guqing 4 nedēļas atpakaļ
revīzija
0d88153b4c

+ 137 - 0
depoly_modify/client.py

@@ -0,0 +1,137 @@
+import requests
+import argparse
+import os
+import sys
+import time
+import numpy as np
+
+# Define the server URL
+SERVER_URL = "http://192.168.9.51:8888"
+
+
+def save_results(results: dict, output_dir: str):
+    """Saves the numpy arrays received from the server."""
+    if not results or "segment" not in results or "pred" not in results:
+        print("Error: The server response did not contain the expected 'segment' or 'pred' arrays.", file=sys.stderr)
+        return
+
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+        print(f"Created output directory: {output_dir}")
+
+    try:
+        # Convert lists back to numpy arrays
+        segment_array = np.array(results['segment'])
+        pred_array = np.array(results['pred'])
+
+        # Save the .npy files
+        segment_path = os.path.join(output_dir, 'segment.npy')
+        pred_path = os.path.join(output_dir, 'pred.npy')
+
+        np.save(segment_path, segment_array)
+        print(f"Successfully saved: {segment_path}")
+
+        np.save(pred_path, pred_array)
+        print(f"Successfully saved: {pred_path}")
+
+    except Exception as e:
+        print(f"An error occurred while saving the result arrays: {e}", file=sys.stderr)
+
+
+def poll_status(task_id: str, output_dir: str):
+    """Polls the server for the task status and retrieves the results."""
+    status_url = f"{SERVER_URL}/status/{task_id}"
+
+    print(f"\nPolling for status updates...")
+
+    while True:
+        try:
+            response = requests.get(status_url, timeout=10)
+            response.raise_for_status()
+
+            data = response.json()
+            status = data.get("status")
+            progress = data.get("progress")
+
+            # Display progress on a single line
+            sys.stdout.write(f"\rStatus: {status} | Progress: {progress}      ")
+            sys.stdout.flush()
+
+            if status == "completed":
+                print("\n\nTask complete. Retrieving results...")
+                save_results(data.get("results"), output_dir)
+                break
+            elif status == "error":
+                print(f"\n\nTask failed on the server with an error: {progress}", file=sys.stderr)
+                break
+
+            time.sleep(3)
+
+        except requests.exceptions.RequestException as e:
+            print(f"\nAn error occurred while polling for status: {e}", file=sys.stderr)
+            break
+        except KeyboardInterrupt:
+            print("\nPolling stopped by user.")
+            break
+
+
+def upload_and_start_task(zip_path: str):
+    """Uploads the scene zip file to the server to start the processing task."""
+    start_url = f"{SERVER_URL}/run_test/"
+
+    if not os.path.exists(zip_path) or not zip_path.endswith('.zip'):
+        print(f"Error: The specified file does not exist or is not a .zip file: '{zip_path}'", file=sys.stderr)
+        return None
+
+    print(f"Uploading {os.path.basename(zip_path)} to the server to start processing...")
+
+    try:
+        with open(zip_path, 'rb') as f:
+            files = {'file': (os.path.basename(zip_path), f, 'application/zip')}
+            response = requests.post(start_url, files=files, timeout=60)  # Increased timeout for upload
+
+        response.raise_for_status()
+
+        task_id = response.json().get("task_id")
+        if task_id:
+            print(f"File uploaded successfully. Task started with ID: {task_id}")
+            return task_id
+        else:
+            print("Error: The server did not return a valid task ID.", file=sys.stderr)
+            return None
+
+    except requests.exceptions.ConnectionError:
+        print(f"\nConnection Error: Could not connect to the server at {SERVER_URL}.", file=sys.stderr)
+        print("Please ensure 'server.py' is running and accessible.", file=sys.stderr)
+        return None
+    except requests.exceptions.RequestException as e:
+        print(f"\nAn error occurred during file upload: {e}", file=sys.stderr)
+        return None
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(
+        description="输入单个场景文件夹用于向服务器传输数据",
+        formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument(
+        '-i',
+        '--input_folder',
+        type=str,
+        required=True,
+        help='指定输入场景的路径。'
+    )
+    args = parser.parse_args()
+    scene_folder = args.input_folder
+
+    DEBUG_ZIP_FILE = os.path.join(scene_folder, "scene.zip")
+    DEBUG_OUTPUT_DIR = os.path.join(scene_folder, "output")
+    if not os.path.exists(DEBUG_OUTPUT_DIR):
+        os.makedirs(DEBUG_OUTPUT_DIR)
+
+    zip_file_path = DEBUG_ZIP_FILE
+    output_directory = DEBUG_OUTPUT_DIR
+    print(f"DEBUG模式启用: 使用 ZIP文件: {zip_file_path}, 输出目录: {output_directory}")
+    task_id = upload_and_start_task(zip_file_path)
+    if task_id:
+        poll_status(task_id, output_directory)

+ 284 - 0
depoly_modify/denosie_downsampling_ply.py

@@ -0,0 +1,284 @@
+import open3d as o3d
+import numpy as np
+import os
+import shutil
+import zipfile
+import json
+from typing import Dict, Any
+import cv2
+import argparse
+
+# from pointclound_downsampling import run
+# 定义屋顶截断的 Z 轴最大阈值。
+# !!! 请根据您的点云数据特性调整这个值 !!!
+Z_MAX_THRESHOLD = 0.8  # 假设墙体最高点不超过 0.5 米
+Z_MIN_THRESHOLD = -1.7
+
+
+def generate_floor_json(width: int, height: int, floor_id: int = 0, name: str = "1楼") -> Dict[str, Any]:
+    x_bound = width / 100.0
+    y_bound = height / 100.0
+
+    floor_data = {
+        "id": floor_id,
+        "subgroup": 0,
+        "name": name,
+        "resolution": {
+            "width": width,
+            "height": height
+        },
+        "bound": {
+            # 边界范围从负值到正值对称
+            "x_min": -x_bound,
+            "x_max": x_bound,
+            "y_min": -y_bound,
+            "y_max": y_bound
+        }
+    }
+
+    return {
+        "floors": [floor_data]
+    }
+
+
+def save_json_to_file(data: Dict[str, Any], filename: str = "output.json"):
+    """
+    将 Python 字典写入格式化的 JSON 文件。
+    """
+    try:
+        with open(filename, 'w', encoding='utf-8') as f:
+            # indent=4 用于美化输出,ensure_ascii=False 确保中文正常显示
+            json.dump(data, f, indent=4, ensure_ascii=False)
+        print(f"✅ JSON 文件已成功保存为: {filename}")
+    except IOError as e:
+        print(f"❌ 写入文件时发生错误: {e}")
+
+
+def build_floor_transform_matrix(j_info: dict, floor_id: int):
+    """
+    遍历 JSON 数据中的楼层信息,查找匹配的 floor_id,并构建 3x3 仿射变换矩阵。
+    此版本返回逆矩阵,用于从世界坐标到归一化坐标的转换。
+    """
+    tab = [[0.0] * 3 for _ in range(3)]
+    res_width = None
+    res_height = None
+
+    for in_json in j_info.get("floors", []):
+        floor_id_in_json = in_json.get("id")
+
+        if floor_id_in_json != floor_id:
+            continue
+
+        res_width = in_json.get("resolution", {}).get("width")
+        res_height = in_json.get("resolution", {}).get("height")
+
+        x_min = in_json.get("bound", {}).get("x_min")
+        x_max = in_json.get("bound", {}).get("x_max")
+        y_min = in_json.get("bound", {}).get("y_min")
+        y_max = in_json.get("bound", {}).get("y_max")
+
+        # 用于从归一化 [0,1] 空间转换到世界坐标空间
+        tab[0][0] = x_max - x_min
+        tab[0][1] = 0.0
+        tab[0][2] = x_min
+
+        tab[1][0] = 0.0
+        tab[1][1] = y_min - y_max
+        tab[1][2] = y_max
+
+        tab[2][0] = 0.0
+        tab[2][1] = 0.0
+        tab[2][2] = 1.0
+
+        break
+
+    if res_width is None:
+        # 未找到楼层,返回单位矩阵和None
+        return np.identity(3).tolist(), None, None
+
+    # 逆矩阵
+    tab_array = np.array(tab, dtype=np.float64)
+    if np.linalg.det(tab_array) == 0:
+        raise ValueError("矩阵是奇异的,无法求逆。")
+
+    tab_inverse_array = np.linalg.inv(tab_array)
+    tab_inverse = tab_inverse_array.tolist()
+    return tab_inverse, res_width, res_height
+
+
+def zip_folder(folder_path, output_path):
+    with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
+        for root, dirs, files in os.walk(folder_path):
+            for file in files:
+                file_path = os.path.join(root, file)
+                zipf.write(file_path, os.path.relpath(file_path, folder_path))
+
+
+def extract_ply_data(ply_file_path, output_dir, scene_info_json_path, floor_id=0, voxel_size=0.03):
+    # 1. 读取 PLY 文件
+    print(f"正在读取文件: {ply_file_path}...")
+    pcd = o3d.io.read_point_cloud(ply_file_path)
+    original_num_points = len(pcd.points)
+    print(f" - 原始点数: {original_num_points}")
+
+    max_z = pcd.get_max_bound()[2]
+    min_z = pcd.get_min_bound()[2]
+    print(f" - 原始 Z 轴范围: [{min_z:.4f}, {max_z:.4f}]")
+
+    # 1.1. 获取变换矩阵
+    if not os.path.exists(scene_info_json_path):
+        raise FileNotFoundError(f"找不到场景信息JSON文件: {scene_info_json_path}")
+    with open(scene_info_json_path, 'r', encoding='utf-8') as f:
+        j_info = json.load(f)
+
+    matrix, res_w, res_h = build_floor_transform_matrix(j_info, floor_id)
+    if res_w is None:
+        raise ValueError(f"在 {scene_info_json_path} 中找不到 floor_id {floor_id}")
+
+    M = np.array(matrix, dtype=np.float64)
+
+    # 1.2. 屋顶和地板截断:基于 Z 轴和XY投影进行裁剪
+    print(f"正在进行截断 (Z 轴和XY平面)...")
+
+    # 转换为 NumPy 数组以便创建布尔掩码
+    points_np = np.asarray(pcd.points)
+
+    # Z 轴筛选的布尔掩码
+    z_mask = (points_np[:, 2] < Z_MAX_THRESHOLD) & (points_np[:, 2] > Z_MIN_THRESHOLD)
+
+    # X/Y 轴筛选的布尔掩码
+    homogeneous_points = np.hstack((points_np[:, :2], np.ones((points_np.shape[0], 1))))
+    normalized_points = homogeneous_points @ M.T
+    pixel_coords = normalized_points[:, :2] * np.array([res_w, res_h])
+    xy_mask = (pixel_coords[:, 0] >= 0) & (pixel_coords[:, 0] < res_w) & \
+              (pixel_coords[:, 1] >= 0) & (pixel_coords[:, 1] < res_h)
+
+    # 合并掩码
+    combined_mask = z_mask & xy_mask
+    inlier_indices = np.where(combined_mask)[0]
+
+    # 使用 select_by_index 方法同步裁剪所有属性
+    pcd = pcd.select_by_index(inlier_indices)
+
+    cut_num_points = len(pcd.points)
+    print(f" - 截断后点数: {cut_num_points}")
+
+    ######################################################
+    # # # 1.2. 下采样到截断后点数的 1/10
+    # sampling_ratio = 0.1
+    # pcd = pcd.random_down_sample(sampling_ratio=sampling_ratio)
+    ######################################################
+
+    ######################################################
+    # voxel_size = 0.03  # 0.02
+    pcd = pcd.voxel_down_sample(voxel_size=voxel_size)
+    print(f"已执行体素下采样,体素大小为: {voxel_size}")
+
+    # MAX_POINTS = 550000
+    MAX_POINTS = 500000
+    num_points_after_voxel = len(pcd.points)
+    print(f"体素下采样后点数: {num_points_after_voxel}")
+
+    if num_points_after_voxel > MAX_POINTS:
+        print(f"  - 点数仍大于 {MAX_POINTS},正在进行第二阶段随机下采样...")
+        indices_to_keep = np.random.choice(num_points_after_voxel, MAX_POINTS, replace=False)
+        pcd = pcd.select_by_index(indices_to_keep)
+        print(f"  - 第二阶段下采样完成,最终点数: {len(pcd.points)}")
+    ######################################################
+
+    # o3d.io.write_point_cloud(output_dir+'/1.ply', pcd, write_ascii=True)
+
+    ################************##########################
+    # pcd = run(ply_file_path, "random")
+    ################************###########################
+
+    downsampled_num_points = len(pcd.points)
+    print(f" - 下采样后点数: {downsampled_num_points}")
+    print(f" - 实际保留比例 (相对于原始点数): {downsampled_num_points / original_num_points:.4f}")
+
+    # 2. 提取数据并转换为 NumPy 数组
+
+    # 提取坐标 (Points)
+    points = np.asarray(pcd.points)
+    points_npy_path = os.path.join(output_dir, 'coord.npy')
+    np.save(points_npy_path, points)
+    print(f" - 坐标信息 (形状: {points.shape}) 已保存到: {points_npy_path}")
+
+    # 提取颜色 (Colors)
+    if pcd.has_colors():
+        colors = np.asarray(pcd.colors)
+        colors_npy_path = os.path.join(output_dir, 'color.npy')
+        np.save(colors_npy_path, colors)
+        print(f" - 颜色信息 (形状: {colors.shape}, 范围: [0, 1]) 已保存到: {colors_npy_path}")
+    else:
+        # 如果 PLY 文件不包含颜色信息,则会打印此警告,这是正常现象。
+        print(" - 警告: PLY 文件不包含颜色信息,跳过保存。")
+        colors = None
+
+    # 提取法线 (Normals)
+    if pcd.has_normals():
+        normals = np.asarray(pcd.normals)
+        normals_npy_path = os.path.join(output_dir, 'normal.npy')
+        np.save(normals_npy_path, normals)
+        print(f" - 法线信息 (形状: {normals.shape}) 已保存到: {normals_npy_path}")
+    else:
+        # 如果 PLY 文件不包含法线信息,则会打印此警告,这是正常现象。
+        print(" - 警告: PLY 文件不包含法线信息,跳过保存。")
+        normals = None
+
+    return points, colors, normals
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description="输入单个场景文件夹用于点云提取,分割和去噪",
+        formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument(
+        '-i',
+        '--input_folder',
+        type=str,
+        required=True,
+        help='指定输入场景的文件夹路径。'
+    )
+    args = parser.parse_args()
+    scene_folder = args.input_folder
+    scenece = os.path.basename(scene_folder)
+
+    ply_file_path = os.path.join(scene_folder, "laser.ply")
+    scene_info_json_path = os.path.join(scene_folder, f"{scenece}.json")
+    floor_path = os.path.join(scene_folder, f"{scenece}.png")
+    process_folder = os.path.join(scene_folder, 'scene/val/process_data')
+
+    if not os.path.exists(process_folder):
+        os.makedirs(process_folder)
+
+    try:
+        if not os.path.exists(ply_file_path):
+            print(f"✅ 文件 '{ply_file_path}' 存在")
+    except FileNotFoundError:
+        raise FileNotFoundError(f"【文件缺失错误】无法找到点云文件:'{ply_file_path}'。请确保文件路径正确。")
+
+    try:
+        if not os.path.exists(floor_path):
+            print(f"✅ 文件 '{floor_path}' 存在")
+    except FileNotFoundError:
+        raise FileNotFoundError(f"【文件缺失错误】无法找到平面图文件:'{floor_path}'。请确保文件路径正确。")
+
+    if not os.path.exists(scene_info_json_path):
+        img = cv2.imread(floor_path)
+        height, width, _ = img.shape
+        json_data = generate_floor_json(width, height)
+        save_json_to_file(json_data, filename=scene_info_json_path)
+
+    extract_ply_data(
+        ply_file_path=ply_file_path,
+        output_dir=process_folder,
+        scene_info_json_path=scene_info_json_path,
+        floor_id=0,
+        voxel_size=0.03
+    )
+
+    data_file = os.path.join(scene_folder, "scene")
+    zip_file = data_file + '.zip'
+    zip_folder(data_file, zip_file)

+ 2 - 0
depoly_modify/depoly_information.txt

@@ -0,0 +1,2 @@
+chmod +x run_all.sh
+./run_all.sh /path/to/your/input/folder

+ 38 - 0
depoly_modify/run_all.sh

@@ -0,0 +1,38 @@
+#!/bin/bash
+set -e
+
+if [ "$#" -ne 1 ]; then
+    echo "--------------------------------------------------------"
+    echo "❌ 错误: 必须提供一个点云场景路径。"
+    echo "用法: $0 <输入文件夹路径>"
+    echo "--------------------------------------------------------"
+    exit 1
+fi
+
+INPUT_FOLDER="$1"
+
+echo "========================================================"
+echo "🚀 算法点云处理任务开始启动。!!!"
+echo "👉 输入文件夹路径: ${INPUT_FOLDER}"
+echo "========================================================"
+
+
+echo ""
+echo "--- [步骤 1/3] 正在执行 denosie_downsampling_ply.py (点云前处理阶段) ---"
+python denosie_downsampling_ply.py -i "${INPUT_FOLDER}"
+echo "✅ 点云采样和去噪执行完成。"
+
+echo ""
+echo "--- [步骤 2/3] 正在执行 client.py (服务器计算阶段) ---"
+python client.py -i "${INPUT_FOLDER}"
+echo "✅ 服务器计算执行完成。"
+
+echo ""
+echo "--- [步骤 3/3] 正在执行 save_result.py (结果生成阶段) ---"
+python save_result.py -i "${INPUT_FOLDER}"
+echo "✅ 结果生成执行完成。"
+
+echo ""
+echo "========================================================"
+echo "🎉 恭喜!所有脚本已成功执行完毕。!!!"
+echo "========================================================"

+ 111 - 0
depoly_modify/save_ply.py

@@ -0,0 +1,111 @@
+import numpy as np
+import open3d as o3d
+import os
+import time
+import json
+import argparse
+
+def save_point_cloud_by_class(coords_file, preds_file, classes_to_show, save_pcd_path):
+    """
+    加载点云坐标和预测类别,筛选指定类别,并保存为.ply文件。
+    """
+    # 1. 定义类别名称和颜色映射
+    CLASS_NAMES = [
+        'refrigerator', 'desk', 'curtain', 'sofa', 'bookshelf', 'bed',
+        'table', 'window', 'cabinet', 'door', 'chair', 'floor', 'wall',
+        'sink', 'toilet', 'bathtub', 'shower curtain', 'picture', 'counter'
+    ]
+    COLOR_MAP = np.array([
+        [174, 199, 232], [255, 127, 14], [152, 223, 138], [214, 39, 40],
+        [148, 103, 189], [255, 187, 120], [140, 86, 75], [152, 223, 138],
+        [152, 223, 138], [152, 223, 138], [196, 156, 148], [127, 127, 127],
+        [152, 223, 138], [188, 189, 34], [219, 219, 141], [227, 119, 194],
+        [31, 119, 180], [255, 152, 150], [82, 84, 163]
+    ])
+
+    # 2. 加载数据
+    try:
+        print(f"正在加载坐标文件: {coords_file}")
+        coords = np.load(coords_file)
+        print(f"正在加载预测文件: {preds_file}")
+        predictions = np.load(preds_file)
+    except FileNotFoundError as e:
+        print(f"错误: 找不到文件 {e.filename}。请确保文件路径正确。")
+        return
+
+    print(f"原始点云数量: {len(coords)}")
+    if len(coords) != len(predictions):
+        print("警告: 坐标点数和预测标签数不匹配!")
+        return
+
+    # 3. 根据 'classes_to_show' 筛选点云
+    if isinstance(classes_to_show, str):
+        target_classes = [classes_to_show]
+    elif isinstance(classes_to_show, list):
+        target_classes = classes_to_show
+    else:
+        print("错误: 'classes_to_show' 参数必须是字符串或列表。")
+        return
+
+    target_indices = [CLASS_NAMES.index(cn) for cn in target_classes if cn in CLASS_NAMES]
+    if not target_indices:
+        print(f"错误: 类别 '{classes_to_show}' 无效或未在 CLASS_NAMES 中找到。")
+        return
+
+    print(f"正在筛选类别: {[CLASS_NAMES[i] for i in target_indices]}")
+    mask = np.isin(predictions, target_indices)
+    coords = coords[mask]
+    predictions = predictions[mask]
+
+    if coords.shape[0] == 0:
+        print(f"警告: 在场景中没有找到属于类别 {target_classes} 的点。")
+        return
+    print(f"筛选后剩余点云数量: {len(coords)}")
+
+    # 4. 创建Open3D点云对象
+    pcd = o3d.geometry.PointCloud()
+    pcd.points = o3d.utility.Vector3dVector(coords)
+    pcd.colors = o3d.utility.Vector3dVector(COLOR_MAP[predictions] / 255.0)
+
+    # 5. 保存筛选后的点云
+    if not save_pcd_path:
+        print("警告: 未提供 'save_pcd_path',不保存文件。")
+        return
+
+    print(f"正在保存筛选后的点云到: {save_pcd_path}")
+    try:
+        o3d.io.write_point_cloud(save_pcd_path, pcd, write_ascii=True)
+        print("保存成功。")
+    except Exception as e:
+        print(f"错误: 保存点云文件失败: {e}")
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(
+        description="输入单个场景文件夹用于结果点云保存",
+        formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument(
+        '-i',
+        '--input_folder',
+        type=str,
+        required=True,
+        help='指定输入场景的文件夹路径。'
+    )
+    args = parser.parse_args()
+    scene_folder = args.input_folder
+    scenece = os.path.basename(scene_folder)
+
+    coords_file = os.path.join(scene_folder, 'scene/val/process_data/coord.npy')
+    preds_file = os.path.join(scene_folder, 'output/pred.npy')
+
+    output_dir = os.path.join(scene_folder, 'wall_ply')
+    os.makedirs(output_dir, exist_ok=True)
+
+    print(f"\n--- 正在处理场景: {scenece} ---")
+    save_point_cloud_by_class(
+        coords_file=coords_file,
+        preds_file=preds_file,
+        classes_to_show=['wall', 'window', 'door', 'cabinet'],
+        save_pcd_path=f'{output_dir}/wall_instances.ply'
+    )

+ 420 - 0
depoly_modify/save_result.py

@@ -0,0 +1,420 @@
+import numpy as np
+import open3d as o3d
+import os
+import time
+import json
+import cv2
+from itertools import groupby
+import argparse
+
+
+# ==============================================================================
+# 优化算法辅助函数
+# ==============================================================================
+
+def get_class_specific_dbscan_params(class_name):
+    """为不同类别返回定制化的DBSCAN超参数。"""
+    default_eps = 0.25
+    default_min_points = 150
+    params = {
+        'bed': {'eps': 0.23, 'min_points': 100},
+        'sofa': {'eps': 0.3, 'min_points': 300},
+        'table': {'eps': 0.3, 'min_points': 300},
+        'desk': {'eps': 0.3, 'min_points': 300},
+        'bookshelf': {'eps': 0.3, 'min_points': 300},
+        'chair': {'eps': 0.2, 'min_points': 100},
+        'refrigerator': {'eps': 0.25, 'min_points': 200},
+        'cabinet': {'eps': 0.3, 'min_points': 200},
+        'door': {'eps': 0.2, 'min_points': 100}
+    }
+    config = params.get(class_name, {'eps': default_eps, 'min_points': default_min_points})
+    return config['eps'], config['min_points']
+
+
+# ==============================================================================
+# 2D投影和绘图函数
+# ==============================================================================
+
+def is_box_dimension_plausible_2d(box_extent_2d, class_name):
+    """检查2D包围盒的尺寸是否在合理范围内(单位:米)。"""
+    plausible_ranges_2d = {
+        'bed': ([1.2, 0.7], [3.0, 2.8]),  # Significantly relaxed range for beds
+        'sofa': ([1.0, 0.7], [4.0, 1.8]),
+        'table': ([0.5, 0.5], [3.0, 1.5]),
+        'desk': ([0.8, 0.5], [2.5, 1.2]),
+        'bookshelf': ([0.5, 0.2], [2.5, 0.8]),
+        'chair': ([0.3, 0.3], [1.2, 1.2]),
+        'refrigerator': ([0.5, 0.5], [1.2, 1.2]),
+        'cabinet': ([0.4, 0.3], [3.0, 1.0]),
+        'door': ([0.6, 0.05], [1.2, 0.3]),
+        'window': ([0.4, 0.05], [3.0, 0.4])
+    }
+    if class_name not in plausible_ranges_2d:
+        return True
+    min_dims, max_dims = plausible_ranges_2d[class_name]
+    sorted_extent = sorted(box_extent_2d)
+    sorted_min = sorted(min_dims)
+    sorted_max = sorted(max_dims)
+    for i in range(2):
+        if not (sorted_min[i] <= sorted_extent[i] <= sorted_max[i]):
+            return False
+    return True
+
+
+def calculate_2d_iou(box1, box2):
+    """计算两个2D包围盒的IoU。盒子格式为[min_x, min_y, max_x, max_y]"""
+    b1 = box1['bbox_2d_pixels']
+    b2 = box2['bbox_2d_pixels']
+    
+    xA = max(b1[0], b2[0])
+    yA = max(b1[1], b2[1])
+    xB = min(b1[2], b2[2])
+    yB = min(b1[3], b2[3])
+
+    interArea = max(0, xB - xA) * max(0, yB - yA)
+    if interArea == 0:
+        return 0.0
+
+    box1Area = (b1[2] - b1[0]) * (b1[3] - b1[1])
+    box2Area = (b2[2] - b2[0]) * (b2[3] - b2[1])
+    
+    unionArea = float(box1Area + box2Area - interArea)
+    if unionArea == 0:
+        return 0.0
+        
+    return interArea / unionArea
+
+
+def post_process_in_2d(instances_with_pixel_boxes, x_m_per_px, y_m_per_px, iou_threshold=0.5):
+    """在2D像素空间中对实例进行尺寸过滤和非极大值抑制(NMS)。"""
+    # 1. 尺寸过滤
+    plausible_instances = []
+    for inst in instances_with_pixel_boxes:
+        px_box = inst['bbox_2d_pixels']  # [min_x, min_y, max_x, max_y]
+        px_width = px_box[2] - px_box[0]
+        px_height = px_box[3] - px_box[1]
+        
+        metric_width = px_width * x_m_per_px
+        metric_height = px_height * y_m_per_px
+        extent_2d = [metric_width, metric_height]
+        
+        if is_box_dimension_plausible_2d(extent_2d, inst['label']):
+            plausible_instances.append(inst)
+        else:
+             print(f"  - 过滤掉一个2D尺寸异常的 '{inst['label']}' 实例,尺寸: {[f'{x:.2f}' for x in extent_2d]}")
+
+    if not plausible_instances:
+        return []
+
+    # 2. 按类别分组进行后处理
+    final_instances = []
+    plausible_instances.sort(key=lambda x: x['label'])
+    
+    for class_name, group in groupby(plausible_instances, key=lambda x: x['label']):
+        class_instances = list(group)
+        
+        # --- SPECIAL MERGING LOGIC FOR BEDS ---
+        if class_name == 'bed':
+            if not class_instances:
+                continue
+
+            # Build adjacency matrix for overlapping beds
+            num_instances = len(class_instances)
+            adj_matrix = np.zeros((num_instances, num_instances))
+            for i in range(num_instances):
+                for j in range(i, num_instances):
+                    # Use a low threshold to merge any overlap
+                    if calculate_2d_iou(class_instances[i], class_instances[j]) > 0.05:
+                        adj_matrix[i, j] = 1
+                        adj_matrix[j, i] = 1
+
+            # Find connected components (groups of overlapping boxes)
+            visited = [False] * num_instances
+            groups = []
+            for i in range(num_instances):
+                if not visited[i]:
+                    component = []
+                    q = [i]
+                    visited[i] = True
+                    while q:
+                        u = q.pop(0)
+                        component.append(u)
+                        for v in range(num_instances):
+                            if adj_matrix[u, v] == 1 and not visited[v]:
+                                visited[v] = True
+                                q.append(v)
+                    groups.append(component)
+            
+            # Merge each group into a single instance
+            merged_instances = []
+            for group_indices in groups:
+                instances_in_group = [class_instances[i] for i in group_indices]
+                
+                # Create the merged bounding box
+                min_x = min(inst['bbox_2d_pixels'][0] for inst in instances_in_group)
+                min_y = min(inst['bbox_2d_pixels'][1] for inst in instances_in_group)
+                max_x = max(inst['bbox_2d_pixels'][2] for inst in instances_in_group)
+                max_y = max(inst['bbox_2d_pixels'][3] for inst in instances_in_group)
+                
+                # Aggregate score and find a representative instance for metadata
+                total_score = sum(inst['score'] for inst in instances_in_group)
+                representative_instance = max(instances_in_group, key=lambda x: x['score'])
+                
+                new_instance = representative_instance.copy()
+                new_instance['bbox_2d_pixels'] = [min_x, min_y, max_x, max_y]
+                new_instance['score'] = total_score
+                merged_instances.append(new_instance)
+            
+            final_instances.extend(merged_instances)
+            print(f"  - 类别 'bed': Merged {len(class_instances)} candidates into {len(merged_instances)} final instances.")
+
+        # --- STANDARD NMS FOR OTHER CLASSES ---
+        else:
+            class_instances.sort(key=lambda x: x['score'], reverse=True)
+            kept_instances = []
+            while class_instances:
+                best_inst = class_instances.pop(0)
+                kept_instances.append(best_inst)
+                
+                remaining_instances = []
+                for other_inst in class_instances:
+                    iou = calculate_2d_iou(best_inst, other_inst)
+                    if iou < iou_threshold:
+                        remaining_instances.append(other_inst)
+                    else:
+                        print(f"  - 2D NMS: 抑制一个与更佳实例IoU为 {iou:.2f} 的 '{class_name}' 实例。")
+                class_instances = remaining_instances
+            
+            final_instances.extend(kept_instances)
+            print(f"  - 类别 '{class_name}': 经过2D过滤和NMS后,剩余 {len(kept_instances)} 个有效实例。")
+            
+    return final_instances
+
+
+def build_floor_transform_matrix(j_info: dict, floor_id: int):
+    tab = [[0.0] * 3 for _ in range(3)]
+    res_width = None
+    res_height = None
+
+    for in_json in j_info.get("floors", []):
+        if in_json.get("id") != floor_id:
+            continue
+
+        res_width = in_json.get("resolution", {}).get("width")
+        res_height = in_json.get("resolution", {}).get("height")
+        bound = in_json.get("bound", {})
+        x_min, x_max = bound.get("x_min"), bound.get("x_max")
+        y_min, y_max = bound.get("y_min"), bound.get("y_max")
+
+        tab[0][0] = x_max - x_min
+        tab[0][2] = x_min
+        tab[1][1] = y_min - y_max
+        tab[1][2] = y_max
+        tab[2][2] = 1.0
+        break
+
+    if res_width is None: return np.identity(3).tolist(), None, None
+    tab_array = np.array(tab, dtype=np.float64)
+    if np.linalg.det(tab_array) == 0: raise ValueError("矩阵是奇异的,无法求逆。")
+    return np.linalg.inv(tab_array).tolist(), res_width, res_height
+
+
+def process_and_draw_bboxes(picture_name, floor_path, instances_path, floor_id, output_image_path, output_json_path):
+    try:
+        img = cv2.imread(picture_name)
+        if img is None: raise FileNotFoundError(f"无法加载背景图片: {picture_name}")
+        
+        with open(floor_path, 'r', encoding='utf-8') as f: j_info = json.load(f)
+        with open(instances_path, 'r', encoding='utf-8') as f: raw_bbox_data = json.load(f)
+
+        matrix, res_w, res_h = build_floor_transform_matrix(j_info, floor_id)
+        if res_w is None: raise ValueError(f"未在 {floor_path} 中找到 ID 为 {floor_id} 的楼层信息。")
+        M = np.array(matrix, dtype=np.float64)
+
+        floor_info = next((f for f in j_info.get("floors", []) if f.get("id") == floor_id), None)
+        bound = floor_info.get("bound", {})
+        x_m_per_px = (bound.get("x_max") - bound.get("x_min")) / res_w
+        y_m_per_px = abs(bound.get("y_max") - bound.get("y_min")) / res_h
+
+        instances_with_pixel_boxes = []
+        for item in raw_bbox_data:
+            corners = item.get("corners", [])
+            if len(corners) < 4: continue
+            
+            points_2d = []
+            for i in range(4):
+                norm_pt = M @ np.array([corners[i][0], corners[i][1], 1.0])
+                points_2d.append([int(norm_pt[0] * res_w), int(norm_pt[1] * res_h)])
+            
+            x_coords, y_coords = [p[0] for p in points_2d], [p[1] for p in points_2d]
+            new_item = item.copy()
+            new_item['bbox_2d_pixels'] = [min(x_coords), min(y_coords), max(x_coords), max(y_coords)]
+            instances_with_pixel_boxes.append(new_item)
+
+        print("\n开始在2D空间进行后处理...")
+        filtered_bbox_data = post_process_in_2d(instances_with_pixel_boxes, x_m_per_px, y_m_per_px)
+        print("2D后处理完成。")
+        
+        instances_2d_data = []
+        for item in filtered_bbox_data:
+            min_x, min_y, max_x, max_y = item['bbox_2d_pixels']
+            label = item["label"]
+            color_bgr = (item["color"][2], item["color"][1], item["color"][0])
+
+            instances_2d_data.append({"label": label, "color": item["color"], "bbox_2d": item['bbox_2d_pixels']})
+
+            cv2.rectangle(img, (min_x, min_y), (max_x, max_y), color_bgr, 2)
+            font = cv2.FONT_HERSHEY_SIMPLEX
+            (text_w, text_h), _ = cv2.getTextSize(label, font, 0.5, 1)
+            label_y = min_y - 10 if min_y - 10 > text_h else min_y + text_h + 10
+            cv2.rectangle(img, (min_x, label_y - text_h - 5), (min_x + text_w, label_y + 5), color_bgr, -1)
+            cv2.putText(img, label, (min_x, label_y), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
+
+        os.makedirs(os.path.dirname(output_image_path), exist_ok=True)
+        os.makedirs(os.path.dirname(output_json_path), exist_ok=True)
+        cv2.imwrite(output_image_path, img)
+        with open(output_json_path, 'w', encoding='utf-8') as f:
+            json.dump(instances_2d_data, f, indent=4)
+        print(f"\n处理完成!2D结果已保存到: {output_image_path} 和 {output_json_path}")
+        return output_json_path, output_image_path
+
+    except Exception as e:
+        print(f"发生错误: {e}")
+        return None, None
+
+
+# ==============================================================================
+# 主函数
+# ==============================================================================
+def visualize_point_cloud_segmentation(coords_file, preds_file, classes_to_show='all',
+                                       classes_to_ignore=None,
+                                       save_pcd_path=None,
+                                       save_3d_json_path=None,
+                                       if_save_ply=False,
+                                       if_save_vision=False):
+    CLASS_NAMES = [
+        'refrigerator', 'desk', 'curtain', 'sofa', 'bookshelf', 'bed',
+        'table', 'window', 'cabinet', 'door', 'chair', 'floor', 'wall',
+        'sink', 'toilet', 'bathtub', 'shower curtain', 'picture', 'counter'
+    ]
+    COLOR_MAP = np.array([
+        [174, 199, 232], [255, 127, 14], [44, 160, 44], [214, 39, 40],
+        [148, 103, 189], [255, 187, 120], [140, 86, 75], [152, 223, 138],
+        [23, 190, 207], [247, 182, 210], [196, 156, 148], [127, 127, 127],
+        [199, 199, 199], [188, 189, 34], [219, 219, 141], [227, 119, 194],
+        [31, 119, 180], [255, 152, 150], [82, 84, 163]
+    ])
+
+    try:
+        coords = np.load(coords_file)
+        predictions = np.load(preds_file)
+    except FileNotFoundError as e:
+        print(f"错误: 找不到文件 {e.filename}。")
+        return None
+    if len(coords) != len(predictions):
+        print("警告: 坐标点数和预测标签数不匹配!")
+        return None
+
+    default_ignore_classes = {'floor', 'wall', 'picture'}
+    ignore_set = default_ignore_classes.union(set(classes_to_ignore or []))
+    show_set = set(classes_to_show) if isinstance(classes_to_show, (list, set)) else None
+
+    final_instances_data, all_instance_points, all_instance_colors = [], [], []
+    print(f"\n通过DBSCAN寻找原始实例...")
+
+    for pred_idx in np.unique(predictions):
+        class_name = CLASS_NAMES[pred_idx]
+        if class_name in ignore_set or (show_set and class_name not in show_set):
+            continue
+
+        dbscan_eps, dbscan_min_points = get_class_specific_dbscan_params(class_name)
+        class_points_indices = np.where(predictions == pred_idx)[0]
+        if len(class_points_indices) < dbscan_min_points: continue
+
+        class_pcd_temp = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(coords[class_points_indices]))
+        instance_labels = np.array(class_pcd_temp.cluster_dbscan(eps=dbscan_eps, min_points=dbscan_min_points, print_progress=False))
+        
+        unique_instances = np.unique(instance_labels[instance_labels != -1])
+        if len(unique_instances) > 0: print(f"- 类别 '{class_name}': 找到 {len(unique_instances)} 个原始候选实例")
+
+        for instance_id in unique_instances:
+            instance_point_indices = np.where(instance_labels == instance_id)[0]
+            if len(instance_point_indices) < dbscan_min_points / 2: continue
+            
+            instance_pcd = class_pcd_temp.select_by_index(instance_point_indices)
+            try:
+                aabb = instance_pcd.get_axis_aligned_bounding_box()
+                points_np = np.asarray(instance_pcd.points)
+                final_instances_data.append({
+                    "label": class_name, "color": COLOR_MAP[pred_idx].tolist(),
+                    "corners": np.asarray(aabb.get_box_points()).tolist(), "score": len(points_np)
+                })
+                all_instance_points.append(points_np)
+                all_instance_colors.append(np.tile(COLOR_MAP[pred_idx] / 255.0, (len(points_np), 1)))
+            except RuntimeError: continue
+
+    print("\n所有原始实例处理完毕。")
+    if save_3d_json_path:
+        os.makedirs(os.path.dirname(save_3d_json_path), exist_ok=True)
+        with open(save_3d_json_path, 'w', encoding='utf-8') as f:
+            json.dump(final_instances_data, f, ensure_ascii=False, indent=4)
+        print(f"原始3D实例JSON信息已保存至: {save_3d_json_path}")
+
+    if if_save_ply and save_pcd_path and all_instance_points:
+        instance_pcd = o3d.geometry.PointCloud()
+        instance_pcd.points = o3d.utility.Vector3dVector(np.vstack(all_instance_points))
+        instance_pcd.colors = o3d.utility.Vector3dVector(np.vstack(all_instance_colors))
+        o3d.io.write_point_cloud(save_pcd_path, instance_pcd)
+        print(f"所有检测到的实例点云已保存至: {save_pcd_path}")
+
+    if if_save_vision:
+        pcd = o3d.geometry.PointCloud(points=o3d.utility.Vector3dVector(coords),
+                                      colors=o3d.utility.Vector3dVector(COLOR_MAP[predictions] / 255.0))
+        o3d.visualization.draw_geometries([pcd], window_name="原始点云", width=1280, height=720)
+
+    return save_3d_json_path
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(
+        description="输入单个场景文件夹用于结果保存",
+        formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument(
+        '-i',
+        '--input_folder',
+        type=str,
+        required=True,
+        help='指定输入场景的文件夹路径。'
+    )
+    args = parser.parse_args()
+    scene_folder = args.input_folder
+    scenece = os.path.basename(scene_folder)
+
+    coords_file = os.path.join(scene_folder, 'scene/val/process_data/coord.npy')
+    preds_file = os.path.join(scene_folder, "output/pred.npy")
+    floor_plan_image = os.path.join(scene_folder, f"{scenece}.png")
+    scene_info_json = os.path.join(scene_folder, f"{scenece}.json")
+
+    output_dir = os.path.join(scene_folder, 'result_2d_filtered')
+    os.makedirs(output_dir, exist_ok=True)
+    raw_instances3d_json_path = os.path.join(output_dir, 'instances3d_raw.json')
+    final_instances2d_json_path = os.path.join(output_dir, 'instances2d_final.json')
+    instances_ply_path = os.path.join(output_dir, 'instances_raw.ply')
+    segment_onfloor_png_path = os.path.join(output_dir, 'segment_onfloor_final.png')
+
+    saved_3d_json = visualize_point_cloud_segmentation(
+        coords_file=coords_file, preds_file=preds_file,
+        classes_to_ignore=['curtain', 'bookshelf', 'floor', 'wall', 'sink', 'toilet', 'bathtub', 'shower curtain', 'picture'],
+        save_3d_json_path=raw_instances3d_json_path, save_pcd_path=instances_ply_path, if_save_ply=False
+    )
+
+    if saved_3d_json and all(os.path.exists(f) for f in [floor_plan_image, scene_info_json]):
+        print("\n--- 开始进行2D投影和后处理 ---")
+        process_and_draw_bboxes(
+            picture_name=floor_plan_image, floor_path=scene_info_json,
+            instances_path=saved_3d_json, floor_id=0,
+            output_image_path=segment_onfloor_png_path, output_json_path=final_instances2d_json_path
+        )
+    else:
+        print("\nSkipping 2D projection due to missing files.")

+ 90 - 0
depoly_modify/verify_denoise_ply.py

@@ -0,0 +1,90 @@
+import open3d as o3d
+import numpy as np
+import os
+
+
+def reconstruct_and_save_ply(npy_dir, output_ply_path):
+    """
+    Reads coord.npy, color.npy, and normal.npy from a directory,
+    reconstructs a point cloud, and saves it as a .ply file.
+    """
+    print(f"Processing directory: {npy_dir}")
+
+    # Define file paths
+    coord_file = os.path.join(npy_dir, 'coord.npy')
+    color_file = os.path.join(npy_dir, 'color.npy')
+    normal_file = os.path.join(npy_dir, 'normal.npy')
+
+    # Check if coordinate file exists (it's required)
+    if not os.path.exists(coord_file):
+        print(f"  - Error: coord.npy not found in {npy_dir}. Skipping.")
+        return
+
+    # Load coordinates
+    try:
+        coords = np.load(coord_file)
+        print(f"  - Loaded {len(coords)} points from coord.npy.")
+    except Exception as e:
+        print(f"  - Error loading coord.npy: {e}. Skipping.")
+        return
+
+    # Create point cloud object
+    pcd = o3d.geometry.PointCloud()
+    pcd.points = o3d.utility.Vector3dVector(coords)
+
+    # Load colors if available
+    if os.path.exists(color_file):
+        try:
+            colors = np.load(color_file)
+            if len(colors) == len(coords):
+                pcd.colors = o3d.utility.Vector3dVector(colors)
+                print("  - Loaded colors.")
+            else:
+                print("  - Warning: Mismatch between point count and color count. Skipping colors.")
+        except Exception as e:
+            print(f"  - Error loading color.npy: {e}.")
+    else:
+        print("  - info: color.npy not found.")
+
+    # Load normals if available
+    if os.path.exists(normal_file):
+        try:
+            normals = np.load(normal_file)
+            if len(normals) == len(coords):
+                pcd.normals = o3d.utility.Vector3dVector(normals)
+                print("  - Loaded normals.")
+            else:
+                print("  - Warning: Mismatch between point count and normal count. Skipping normals.")
+        except Exception as e:
+            print(f"  - Error loading normal.npy: {e}.")
+    else:
+        print("  - info: normal.npy not found.")
+
+    # Save the reconstructed point cloud
+    try:
+        o3d.io.write_point_cloud(output_ply_path, pcd, write_ascii=True)
+        print(f"  - Successfully saved reconstructed point cloud to: {output_ply_path}")
+    except Exception as e:
+        print(f"  - Error saving .ply file: {e}")
+
+
+if __name__ == '__main__':
+    scenes_folder = "/media/gu/d54b9541-2b55-4c75-b059-3006d51983d53/lqc/Downloads/Scenes"
+    scenes = os.listdir(scenes_folder)
+    for scene_name in scenes:
+        print(f"\nVerifying scene: {scene_name}...")
+        scene_folder = os.path.join(scenes_folder, scene_name)
+        npy_folder = os.path.join(scene_folder, "scene/val/process_data")
+
+        # Directory where the .npy files are located
+        process_folder = os.path.join(scene_folder, 'denoise_ply')
+        if not os.path.exists(process_folder):
+            os.makedirs(process_folder)
+
+        # Path for the output .ply file
+        output_ply = os.path.join(scene_folder, 'verified_cut_result.ply')
+
+        if os.path.isdir(process_folder):
+            reconstruct_and_save_ply(npy_folder, output_ply)
+        else:
+            print(f"  - Process data folder not found: {process_folder}. Skipping scene.")

+ 158 - 0
depoly_modify/wall_density_generate.py

@@ -0,0 +1,158 @@
+import os
+import numpy as np
+import json
+import cv2
+import open3d as o3d
+import argparse
+
+def build_floor_transform_matrix(j_info: dict, floor_id: int):
+    tab = [[0.0] * 3 for _ in range(3)]
+    res_width = None
+    res_height = None
+    for in_json in j_info.get("floors", []):
+        floor_id_in_json = in_json.get("id")
+        if floor_id_in_json != floor_id:
+            continue
+        res_width = in_json.get("resolution", {}).get("width")
+        res_height = in_json.get("resolution", {}).get("height")
+
+        x_min = in_json.get("bound", {}).get("x_min")
+        x_max = in_json.get("bound", {}).get("x_max")
+        y_min = in_json.get("bound", {}).get("y_min")
+        y_max = in_json.get("bound", {}).get("y_max")
+
+        # 这个矩阵用于从归一化 [0,1] 空间转换到世界坐标空间
+        tab[0][0] = x_max - x_min
+        tab[0][1] = 0.0
+        tab[0][2] = x_min
+
+        tab[1][0] = 0.0
+        tab[1][1] = y_min - y_max
+        tab[1][2] = y_max
+
+        tab[2][0] = 0.0
+        tab[2][1] = 0.0
+        tab[2][2] = 1.0
+
+        break
+
+    if res_width is None:
+        return np.identity(3).tolist(), None, None
+    tab_array = np.array(tab, dtype=np.float64)
+    if np.linalg.det(tab_array) == 0:
+        raise ValueError("矩阵是奇异的,无法求逆。")
+    tab_inverse_array = np.linalg.inv(tab_array)
+    tab_inverse = tab_inverse_array.tolist()
+    return tab_inverse, res_width, res_height
+
+
+def generate_maps(ply_path, floor_json_path, floor_id, output_mask_path, output_density_path, output_binary_path):
+    """
+    Generates multiple maps from a .ply file: a raw projection, a density map,
+    and a manually binarized map.
+
+    Args:
+        ply_path (str): Path to the input PLY file.
+        floor_json_path (str): Path to the floor JSON file.
+        floor_id (int): The ID of the floor to process.
+        output_mask_path (str): Path to save the raw projection mask.
+        output_density_path (str): Path to save the density map.
+        output_binary_path (str): Path to save the manually binarized map.
+    """
+    try:
+        with open(floor_json_path, 'r') as f:
+            j_info = json.load(f)
+    except FileNotFoundError:
+        print(f"JSON file not found at {floor_json_path}")
+        return
+    except json.JSONDecodeError:
+        print(f"Error decoding JSON from {floor_json_path}")
+        return
+
+    matrix, res_w, res_h = build_floor_transform_matrix(j_info, floor_id)
+    if res_w is None or res_h is None:
+        print(f"Could not find floor_id {floor_id} in {os.path.basename(floor_json_path)}")
+        return
+
+    try:
+        pcd = o3d.io.read_point_cloud(ply_path)
+        if not pcd.has_points():
+            print(f"Warning: Point cloud is empty in {ply_path}")
+            return
+        
+        cl, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
+        denoised_pcd = pcd.select_by_index(ind)
+        points = np.asarray(denoised_pcd.points)[:, :2]
+    except Exception as e:
+        print(f"Failed to read or process point cloud file {ply_path}: {e}")
+        return
+
+    mask_image = np.zeros((res_h, res_w), dtype=np.uint8)
+    density_accumulator = np.zeros((res_h, res_w), dtype=np.float32)
+    matrix = np.array(matrix, dtype=np.float64)
+
+    for world_point in points:
+        homogeneous_point = np.array([world_point[0], world_point[1], 1.0], dtype=np.float64)
+        normalized_point = matrix @ homogeneous_point
+        pixel_x = int(normalized_point[0] * res_w)
+        pixel_y = int(normalized_point[1] * res_h)
+        if 0 <= pixel_x < res_w and 0 <= pixel_y < res_h:
+            mask_image[pixel_y, pixel_x] = 255
+            density_accumulator[pixel_y, pixel_x] += 1
+
+    output_dir = os.path.dirname(output_mask_path)
+    if output_dir and not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+
+    # Save raw projection mask
+    # cv2.imwrite(output_mask_path, mask_image)
+    # print(f"Successfully generated and saved wall mask to {output_mask_path}")
+
+    # Save grayscale density map
+    density_map_grayscale = cv2.normalize(density_accumulator, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
+    # cv2.imwrite(output_density_path, density_map_grayscale)
+    # print(f"Successfully generated and saved density map to {output_density_path}")
+
+    # Binarize the grayscale density map using a manual threshold.
+    # This threshold value (0-255) can be tuned to change sensitivity.
+    manual_threshold = 20 #10
+    _, binary_map = cv2.threshold(density_map_grayscale, manual_threshold, 255, cv2.THRESH_BINARY)
+    
+    print(f"Binarizing {os.path.basename(output_binary_path)} with manual threshold: {manual_threshold}")
+    
+    # Save the manually binarized map.
+    cv2.imwrite(output_binary_path, binary_map)
+    print(f"Successfully generated and saved binarized map to {output_binary_path}")
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description="输入单个场景文件夹用于墙壁点云概率密度图的生成",
+        formatter_class=argparse.RawTextHelpFormatter
+    )
+    parser.add_argument(
+        '-i',
+        '--input_folder',
+        type=str,
+        required=True,
+        help='指定输入场景的文件夹路径。'
+    )
+    args = parser.parse_args()
+    scene_folder = args.input_folder
+    scenece = os.path.basename(scene_folder)
+
+    floor_id = 0
+
+    ply_path = os.path.join(scene_folder, "wall_ply/wall_instances.ply")
+
+    output_dir = os.path.join(scene_folder, "output_maps_walldensity")
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+
+    floor_json_path = os.path.join(scene_folder, f"{scenece}.json")
+
+    output_mask_path = os.path.join(output_dir, f"{scenece}_floor_{floor_id}_mask.png")
+    output_density_path = os.path.join(output_dir, f"{scenece}_floor_{floor_id}_density.png")
+    output_binary_path = os.path.join(output_dir, f"{scenece}_floor_{floor_id}_binary.png")
+
+    generate_maps(ply_path, floor_json_path, floor_id, output_mask_path, output_density_path, output_binary_path)