41 Commits

Author SHA1 Message Date
  Emin 614a91b4d1
!76 merge into oscc/master 3 months ago
  0xharry 0d642cf571 fix(tapcell): add null check for core site in tapCells function 3 months ago
  0xharry 6f8f7cf064 refactor: update .dockerignore to include files/dirs 3 months ago
  0xharry d237c32f8f refactor: remove redundant CMAKE_BUILD_TYPE debug setting from CMakeLists.txt files 3 months ago
  simintao 7e7d7e4f0a fix:use output cap 3 months ago
  simintao 245f684e11 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao dc3e0bbf0f feature:support dump power graph json 3 months ago
  Emin afb077b6ac !84 fix(ipl): add target_link_libraries indentation for ENABLE_AI condition 3 months ago
  Yell-walkalone be20ad22a4 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao bd8cdb6774 refactor:remove extra space 3 months ago
  simintao b92593c804 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao bf52d932ad fix:mask graph dump 3 months ago
  Emin a3dae1d6fa !83 fix(log): add fallback for missing glog-config.cmake on ubuntu22 3 months ago
  Yell-walkalone dbec12acb9 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao 78444c633d fix:add glog print 3 months ago
  simintao 05f2b4db66 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao 4a6e72308a refactor:top module log 3 months ago
  YihangQiu 30a0e3ae76 feat: add "batch_mode" parameter to control net/patch.json vectorization output structure. 3 months ago
  zxy fe725a0920 update Verilog writer 3 months ago
  Yell-walkalone 1613425c37 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  Yell-walkalone c891c27046 build ai module & refactor wire length predictor by ai method 3 months ago
  simintao 13ed3f0a03 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao 16cf4a58f0 fix:assign output write 3 months ago
  Emin fc650e0c2a !82 fix(log): add glog version handling in CMake and Log implementation 3 months ago
  simintao e8a8d367bb Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao ebbbd42df7 fix:merge two net 3 months ago
  Yell-walkalone 67e88f18b1 change AI flag definition 3 months ago
  Yell-walkalone bec7ec74fb fix bugs for log 3 months ago
  Yell-walkalone afdc2161c4 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  Yell-walkalone bf2e3616dd merge & fix bug for log 3 months ago
  Yell-walkalone 395eea445f ognore illegal layer for net patterns 3 months ago
  ZhishengZeng 3f6a62d023
!81 update func 3 months ago
  ZhishengZeng b12a445aa9 update func 3 months ago
  ZhishengZeng 89e17cf64d Merge branch 'master' of gitee.com:ieda-ipd/iEDA into nn_master 3 months ago
  ZhishengZeng 777fe340c8 update track 3 months ago
  simintao 3bc3316ef4 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao 3f05d4ba0b fix:input/output direction dump 3 months ago
  ZhishengZeng 3179de2f5c Merge branch 'master' of gitee.com:ieda-ipd/iEDA into nn_master 3 months ago
  zs z db68fa63f7 update log 3 months ago
  zs z d4a4159ccc Merge branch 'master' of gitee.com:ieda-ipd/iEDA into nn_master 3 months ago
  zs z 999e026fb6 update 3 months ago
72 changed files with 1240 additions and 916 deletions
Split View
  1. +13
    -10
      .dockerignore
  2. +6
    -1
      CMakeLists.txt
  3. +1
    -0
      src/ai/CMakeLists.txt
  4. +1
    -0
      src/ai/predictor/CMakeLists.txt
  5. +9
    -4
      src/ai/predictor/ipl_wirelength/CMakeLists.txt
  6. +17
    -5
      src/ai/predictor/ipl_wirelength/ai_wirelength.cc
  7. +24
    -26
      src/ai/predictor/ipl_wirelength/ai_wirelength.hh
  8. +152
    -0
      src/ai/predictor/ipl_wirelength/normalization_handler.cc
  9. +0
    -0
      src/ai/predictor/ipl_wirelength/normalization_handler.hh
  10. +213
    -0
      src/ai/predictor/ipl_wirelength/onnx_model_handler.cc
  11. +0
    -0
      src/ai/predictor/ipl_wirelength/onnx_model_handler.hh
  12. +148
    -0
      src/ai/predictor/ipl_wirelength/wirelength_predictor.cc
  13. +0
    -0
      src/ai/predictor/ipl_wirelength/wirelength_predictor.hh
  14. +6
    -11
      src/database/manager/builder/verilog_builder/verilog_read.cpp
  15. +45
    -45
      src/database/manager/builder/verilog_builder/verilog_write.cpp
  16. +1
    -1
      src/database/manager/parser/verilog/verilog-rust/verilog-parser/src/verilog_parser/mod.rs
  17. +1
    -1
      src/interface/python/py_vec/py_register_vec.h
  18. +2
    -2
      src/interface/python/py_vec/py_vec.cpp
  19. +1
    -1
      src/interface/python/py_vec/py_vec.h
  20. +2
    -2
      src/operation/iDRC/interface/DRCInterface.cpp
  21. +4
    -0
      src/operation/iFP/source/module/tap_cell/tapcell.cpp
  22. +8
    -0
      src/operation/iPA/source/module/core/PwrArc.hh
  23. +8
    -0
      src/operation/iPA/source/module/core/PwrVertex.hh
  24. +27
    -13
      src/operation/iPA/source/module/ops/calc_power/PwrCalcInternalPower.cc
  25. +183
    -0
      src/operation/iPA/source/module/ops/dump/PwrDumpGraph.cc
  26. +31
    -7
      src/operation/iPA/source/module/ops/dump/PwrDumpGraph.hh
  27. +9
    -15
      src/operation/iPL/api/PLAPI.cc
  28. +2
    -2
      src/operation/iPL/api/PLAPI.hh
  29. +0
    -8
      src/operation/iPL/source/module/CMakeLists.txt
  30. +0
    -13
      src/operation/iPL/source/module/ai_predictor/CMakeLists.txt
  31. +0
    -143
      src/operation/iPL/source/module/ai_predictor/wirelength/normalization_handler.cc
  32. +0
    -210
      src/operation/iPL/source/module/ai_predictor/wirelength/onnx_model_handler.cc
  33. +0
    -146
      src/operation/iPL/source/module/ai_predictor/wirelength/wirelength_predictor.cc
  34. +7
    -0
      src/operation/iPL/source/module/detail_placer/CMakeLists.txt
  35. +17
    -56
      src/operation/iPL/source/module/detail_placer/DetailPlacer.cc
  36. +5
    -9
      src/operation/iPL/source/module/detail_placer/DetailPlacer.hh
  37. +0
    -19
      src/operation/iPL/source/module/evaluator/wirelength/CMakeLists.txt
  38. +1
    -2
      src/operation/iPL/source/module/evaluator/wirelength/Wirelength.hh
  39. +5
    -1
      src/operation/iPL/source/module/topology_manager/CMakeLists.txt
  40. +1
    -1
      src/operation/iPL/source/module/topology_manager/TopologyManager.hh
  41. +0
    -2
      src/operation/iPNP/source/config/CMakeLists.txt
  42. +0
    -2
      src/operation/iPNP/source/module/synthesis/CMakeLists.txt
  43. +4
    -2
      src/operation/iRT/interface/RTInterface.cpp
  44. +42
    -9
      src/operation/iRT/source/data_manager/DataManager.cpp
  45. +2
    -1
      src/operation/iRT/source/data_manager/DataManager.hpp
  46. +2
    -0
      src/operation/iRT/source/module/detailed_router/DetailedRouter.cpp
  47. +3
    -0
      src/operation/iRT/source/module/layer_assigner/LayerAssigner.cpp
  48. +2
    -0
      src/operation/iRT/source/module/pin_accessor/PinAccessor.cpp
  49. +3
    -0
      src/operation/iRT/source/module/space_router/SpaceRouter.cpp
  50. +2
    -0
      src/operation/iRT/source/module/supply_analyzer/SupplyAnalyzer.cpp
  51. +3
    -0
      src/operation/iRT/source/module/topology_generator/TopologyGenerator.cpp
  52. +2
    -0
      src/operation/iRT/source/module/track_assigner/TrackAssigner.cpp
  53. +2
    -0
      src/operation/iRT/source/module/violation_reporter/ViolationReporter.cpp
  54. +3
    -1
      src/operation/iSTA/source/module/netlist/NetlistWriter.cc
  55. +1
    -1
      src/operation/iSTA/source/module/netlist/Port.hh
  56. +0
    -2
      src/operation/iSTA/source/module/sta/CMakeLists.txt
  57. +6
    -9
      src/operation/iSTA/source/module/sta/Sta.cc
  58. +7
    -6
      src/operation/iSTA/source/module/sta/StaDelayPropagation.cc
  59. +3
    -3
      src/operation/iSTA/source/module/sta/StaDump.cc
  60. +13
    -14
      src/operation/iSTA/source/module/sta/StaDump.hh
  61. +32
    -0
      src/utility/log/CMakeLists.txt
  62. +14
    -1
      src/utility/log/Log.cc
  63. +3
    -5
      src/utility/memory/MemoryMonitor.cc
  64. +2
    -12
      src/utility/memory/MemoryMonitor.hh
  65. +2
    -2
      src/vectorization/api/vec_api.cpp
  66. +1
    -1
      src/vectorization/api/vec_api.h
  67. +2
    -2
      src/vectorization/src/data_manager/vec_dm.cpp
  68. +1
    -1
      src/vectorization/src/data_manager/vec_dm.h
  69. +127
    -80
      src/vectorization/src/data_manager/vec_file.cpp
  70. +3
    -3
      src/vectorization/src/data_manager/vec_file.h
  71. +2
    -2
      src/vectorization/src/vectorization.cpp
  72. +1
    -1
      src/vectorization/src/vectorization.h

+ 13
- 10
.dockerignore View File

@@ -1,10 +1,13 @@
.git
.gitee
.github
.vscode
build
docs
bin
scripts/foundry/sky130
scripts/design/sky130_gcd
**/target/
*
!cmake/
!scripts/hello.tcl
!scripts/design/ihp130_gcd/
!scripts/foundry/ihp130/ihp-sg13g2/libs.ref/
!src/
!build.sh
!CMakeLists.txt
!Dockerfile
!README.md
!README-CN.md
!LICENSE
!.git*

+ 6
- 1
CMakeLists.txt View File

@@ -39,7 +39,7 @@ option(USE_PROFILER "Enable performance profiling (default OFF)" OFF)
option(SANITIZER "Enable address sanitizer (default OFF)" OFF)
option(BUILD_GUI "Enable GUI components (default OFF)" OFF)
option(USE_GPU "Enable GPU acceleration (default OFF)" OFF)
option(BUILD_AI_PREDICTOR "Enable AI predictor modules (default OFF)" OFF)
option(ENABLE_AI "Enable AI modules (default OFF)" OFF)
option(COMPATIBILITY_MODE "Enable compatibility mode (disable aggressive optimizations)" ON)

# Define GLOG_USE_GLOG_EXPORT for glog 0.7.1+ compatibility
@@ -116,6 +116,11 @@ include(cmake/operation/ista.cmake)
include(cmake/operation/ipa.cmake)
include(cmake/rust.cmake)

if(ENABLE_AI)
add_definitions(-DENABLE_AI)
add_subdirectory(src/ai)
endif()

add_subdirectory(src/third_party)
add_subdirectory(src/utility)
add_subdirectory(src/apps)


+ 1
- 0
src/ai/CMakeLists.txt View File

@@ -0,0 +1 @@
add_subdirectory(predictor)

+ 1
- 0
src/ai/predictor/CMakeLists.txt View File

@@ -0,0 +1 @@
add_subdirectory(ipl_wirelength)

src/operation/iPL/source/module/ai_predictor/wirelength/CMakeLists.txt → src/ai/predictor/ipl_wirelength/CMakeLists.txt View File

@@ -1,17 +1,22 @@
add_library(ipl_module_ai_wirelength
add_library(ipl_predictor_wirelen
ai_wirelength.cc
wirelength_predictor.cc
onnx_model_handler.cc
normalization_handler.cc
)

target_link_libraries(ipl_module_ai_wirelength
target_link_libraries(ipl_predictor_wirelen
PUBLIC
${HOME_THIRDPARTY}/onnxruntime/libonnxruntime.so
log
ipl_module_evaluator_wirelength
ipl-module-topology_manager
)

target_include_directories(ipl_module_ai_wirelength
target_include_directories(ipl_predictor_wirelen
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
${HOME_THIRDPARTY}/onnxruntime/include
${HOME_THIRDPARTY}/json
)
${HOME_OPERATION}/iPL/source/data
)

src/operation/iPL/source/module/evaluator/wirelength/AIWirelength.cc → src/ai/predictor/ipl_wirelength/ai_wirelength.cc View File

@@ -16,17 +16,29 @@
// ***************************************************************************************
/*
* @Description: AI-based wirelength evaluator implementation
* @FilePath: /iEDA/src/iPL/src/evaluator/wirelength/AIWirelength.cc
* @FilePath: /iEDA/src//ai/predictor/ipl_wirelength/ai_wirelength.cc
*/

#include "AIWirelength.hh"
#include "Log.hh"
#include "data/Point.hh"
#include "data/Rectangle.hh"
#include "ai_wirelength.hh"

#include <vector>

#include "Log.hh"
#include "Point.hh"
#include "Rectangle.hh"
#include "TopologyManager.hh"

namespace ipl {

AIWirelength* AIWirelength::_instance = nullptr;

bool AIWirelength::init(const std::string& model_path, const std::string& params_path, TopologyManager* topology_manager)
{
_topology_manager = topology_manager;

return loadModel(model_path) && loadNormalizationParams(params_path);
}

bool AIWirelength::loadModel(const std::string& model_path)
{
if (_predictor->loadModel(model_path)) {

src/operation/iPL/source/module/evaluator/wirelength/AIWirelength.hh → src/ai/predictor/ipl_wirelength/ai_wirelength.hh View File

@@ -16,62 +16,60 @@
// ***************************************************************************************
/*
* @Description: AI-based wirelength evaluator
* @FilePath: /iEDA/src/iPL/src/evaluator/wirelength/AIWirelength.hh
* @FilePath: /iEDA/src/ai/predictor/ipl_wirelength/ai_wirelength.hh
*/

#ifndef IPL_EVALUATOR_AI_WIRELENGTH_H
#define IPL_EVALUATOR_AI_WIRELENGTH_H

#include "Wirelength.hh"

#include <memory>

#include "Wirelength.hh"
#include "wirelength_predictor.hh"

#define aiPLWireLengthInst ipl::AIWirelength::getInstance()

namespace ipl {

class AIWirelength : public Wirelength
class TopologyManager;

class AIWirelength
{
public:
AIWirelength() = delete;
explicit AIWirelength(TopologyManager* topology_manager);
AIWirelength(const AIWirelength&) = delete;
AIWirelength(AIWirelength&&) = delete;
~AIWirelength() override = default;
static AIWirelength* getInstance()
{
if (!_instance) {
_instance = new AIWirelength;
}
return _instance;
}

AIWirelength& operator=(const AIWirelength&) = delete;
AIWirelength& operator=(AIWirelength&&) = delete;
bool init(const std::string& model_path, const std::string& params_path, TopologyManager* topology_manager);

// Load ONNX model for wirelength prediction
bool loadModel(const std::string& model_path);
bool loadNormalizationParams(const std::string& params_path);

// Check if model is loaded
bool isModelLoaded() const;
bool isModelLoaded() const { return _is_model_loaded; }

// Override virtual methods from Wirelength base class
int64_t obtainTotalWirelength() override;
int64_t obtainNetWirelength(int32_t net_id) override;
int64_t obtainPartOfNetWirelength(int32_t net_id, int32_t sink_pin_id) override;
int64_t obtainTotalWirelength();
int64_t obtainNetWirelength(int32_t net_id);
int64_t obtainPartOfNetWirelength(int32_t net_id, int32_t sink_pin_id);

// Extract features for a net
std::vector<float> extractNetFeatures(int32_t net_id);

private:
std::unique_ptr<WirelengthPredictor> _predictor;
static AIWirelength* _instance;
TopologyManager* _topology_manager = nullptr;
std::unique_ptr<WirelengthPredictor> _predictor = std::make_unique<WirelengthPredictor>();
bool _is_model_loaded = false;
};

inline AIWirelength::AIWirelength(TopologyManager* topology_manager) : Wirelength(topology_manager),
_predictor(std::make_unique<WirelengthPredictor>())
{
}

inline bool AIWirelength::isModelLoaded() const
{
return _is_model_loaded;
}
AIWirelength() = default;
~AIWirelength() = default;
};

} // namespace ipl


+ 152
- 0
src/ai/predictor/ipl_wirelength/normalization_handler.cc View File

@@ -0,0 +1,152 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************

#include "normalization_handler.hh"

#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <sstream>

#include "Log.hh"
#include "json.hpp"

namespace ipl {

NormalizationHandler::NormalizationHandler() : _is_loaded(false)
{
}

NormalizationHandler::~NormalizationHandler()
{
}

bool NormalizationHandler::loadMinMaxParams(const std::string& params_path)
{
return _parseJsonParams(params_path);
}

void NormalizationHandler::setMinMaxParams(const std::vector<float>& data_min, const std::vector<float>& data_max,
const std::vector<std::string>& feature_names)
{
if (data_min.size() != data_max.size()) {
LOG_WARNING << "Error: data_min and data_max must have same size";
return;
}

_data_min = data_min;
_data_max = data_max;
_feature_names = feature_names;
_is_loaded = true;
}

std::vector<float> NormalizationHandler::normalize(const std::vector<float>& features) const
{
if (!_is_loaded) {
LOG_WARNING << "Error: Normalization parameters not loaded";
return features;
}

if (features.size() != _data_min.size()) {
LOG_WARNING << "Error: Feature size mismatch. Expected " << _data_min.size() << ", got " << features.size();
return features;
}

std::vector<float> normalized_features;
normalized_features.reserve(features.size());

for (size_t i = 0; i < features.size(); ++i) {
float range = _data_max[i] - _data_min[i];
if (range == 0.0f) {
// if max == min, normailzed = 0
normalized_features.push_back(0.0f);
} else {
// MinMax normalization: (x - min) / (max - min)
float normalized = (features[i] - _data_min[i]) / range;

normalized = std::max(0.0f, std::min(1.0f, normalized));
normalized_features.push_back(normalized);
}
}

return normalized_features;
}

bool NormalizationHandler::isReady() const
{
return _is_loaded;
}

std::vector<std::string> NormalizationHandler::getFeatureNames() const
{
return _feature_names;
}

size_t NormalizationHandler::getFeatureCount() const
{
return _data_min.size();
}

bool NormalizationHandler::_parseJsonParams(const std::string& params_path)
{
std::ifstream file(params_path);
if (!file.is_open()) {
LOG_WARNING << "Error: Cannot open normalization parameters file: " << params_path;
return false;
}

try {
nlohmann::json j;
file >> j;

// parse data
if (j.contains("data_min") && j.contains("data_max")) {
_data_min = j["data_min"].get<std::vector<float>>();
_data_max = j["data_max"].get<std::vector<float>>();

if (j.contains("feature_names")) {
_feature_names = j["feature_names"].get<std::vector<std::string>>();
}

if (_data_min.size() != _data_max.size() || _data_min.empty()) {
LOG_WARNING << "Error: Invalid normalization parameters - size mismatch";
return false;
}

_is_loaded = true;

LOG_INFO << "Successfully loaded normalization parameters:";
LOG_INFO << " Features: " << _data_min.size();
LOG_INFO << " Feature names: ";
for (const auto& name : _feature_names) {
LOG_INFO << name << " ";
}

return true;
} else {
LOG_WARNING << "Error: Missing required fields in JSON";
return false;
}

} catch (const std::exception& e) {
LOG_WARNING << "Error parsing JSON: " << e.what();
return false;
}
}

} // namespace ipl

src/operation/iPL/source/module/ai_predictor/wirelength/normalization_handler.hh → src/ai/predictor/ipl_wirelength/normalization_handler.hh View File


+ 213
- 0
src/ai/predictor/ipl_wirelength/onnx_model_handler.cc View File

@@ -0,0 +1,213 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
#include "onnx_model_handler.hh"

#include <iostream>

#include "Log.hh"

namespace ipl {

ONNXModelHandler::ONNXModelHandler() : _env(ORT_LOGGING_LEVEL_WARNING, "ONNXModelHandler")
{
// Initialize ONNX Runtime environment
_session_options.SetIntraOpNumThreads(1);
_session_options.SetInterOpNumThreads(1);
_session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_BASIC);
}

ONNXModelHandler::~ONNXModelHandler()
{
// Clean up - smart pointers handle this automatically
}

bool ONNXModelHandler::loadModel(const std::string& model_path)
{
try {
// Create session from model file
_session = std::make_unique<Ort::Session>(_env, model_path.c_str(), _session_options);

// Get allocator
Ort::AllocatorWithDefaultOptions allocator;

// Get input information
size_t num_input_nodes = _session->GetInputCount();
if (num_input_nodes == 0) {
LOG_WARNING << "Model has no input nodes";
return false;
}

// Get input names and shapes
_input_names.clear();
_input_shapes.clear();

for (size_t i = 0; i < num_input_nodes; i++) {
// Get input name using the correct API
Ort::AllocatedStringPtr input_name_ptr = _session->GetInputNameAllocated(i, allocator);
_input_names.push_back(std::string(input_name_ptr.get()));

// Get input type info
Ort::TypeInfo input_type_info = _session->GetInputTypeInfo(i);
auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();

// Get input shape
std::vector<int64_t> input_shape = input_tensor_info.GetShape();
_input_shapes.push_back(input_shape);
}

// Get output information
size_t num_output_nodes = _session->GetOutputCount();
if (num_output_nodes == 0) {
LOG_WARNING << "Model has no output nodes";
return false;
}

// Get output names and shapes
_output_names.clear();
_output_shapes.clear();

for (size_t i = 0; i < num_output_nodes; i++) {
// Get output name
Ort::AllocatedStringPtr output_name_ptr = _session->GetOutputNameAllocated(i, allocator);
_output_names.push_back(std::string(output_name_ptr.get()));

// Get output type info
Ort::TypeInfo output_type_info = _session->GetOutputTypeInfo(i);
auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();

// Get output shape
std::vector<int64_t> output_shape = output_tensor_info.GetShape();
_output_shapes.push_back(output_shape);
}

// Validate shapes for our use case
if (_input_shapes[0].size() != 2) {
LOG_WARNING << "Unexpected input shape dimension: " << _input_shapes[0].size();
return false;
}

if (_output_shapes[0].size() != 2) {
LOG_WARNING << "Unexpected output shape dimension: " << _output_shapes[0].size();
return false;
}

// Set feature counts (assuming batch dimension is dynamic or 1)
_input_feature_count = static_cast<int>(_input_shapes[0][1]);
_output_feature_count = static_cast<int>(_output_shapes[0][1]);

LOG_INFO << "Successfully loaded ONNX model from " << model_path;
LOG_INFO << "Input name: " << _input_names[0];
LOG_INFO << "Output name: " << _output_names[0];
LOG_INFO << "Input feature count: " << _input_feature_count;
LOG_INFO << "Output feature count: " << _output_feature_count;

return true;
} catch (const Ort::Exception& e) {
LOG_WARNING << "ONNX exception: " << e.what();
return false;
} catch (const std::exception& e) {
LOG_WARNING << "Exception: " << e.what();
return false;
}
}

std::vector<float> ONNXModelHandler::predict(const std::vector<float>& input)
{
if (!_session) {
LOG_WARNING << "Model not loaded";
return {};
}

if (input.size() != static_cast<size_t>(_input_feature_count)) {
LOG_WARNING << "Input feature count mismatch: expected " << _input_feature_count << ", got " << input.size();
return {};
}

try {
// Create input tensor
const std::vector<int64_t> input_shape = {1, _input_feature_count}; // Batch size 1

Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);

Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, const_cast<float*>(input.data()), input.size(),
input_shape.data(), input_shape.size());

if (!input_tensor.IsTensor()) {
LOG_WARNING << "Failed to create input tensor";
return {};
}

// Prepare input and output names
std::vector<const char*> input_names_cstr;
std::vector<const char*> output_names_cstr;

for (const auto& name : _input_names) {
input_names_cstr.push_back(name.c_str());
}
for (const auto& name : _output_names) {
output_names_cstr.push_back(name.c_str());
}

// Run inference
std::vector<Ort::Value> output_tensors = _session->Run(Ort::RunOptions{nullptr}, input_names_cstr.data(), &input_tensor, 1,
output_names_cstr.data(), output_names_cstr.size());

if (output_tensors.empty()) {
LOG_WARNING << "Failed to get output tensors";
return {};
}

// Get output data
float* output_data = output_tensors[0].GetTensorMutableData<float>();
if (!output_data) {
LOG_WARNING << "Failed to get output data";
return {};
}

// Get the actual output size
auto output_tensor_info = output_tensors[0].GetTensorTypeAndShapeInfo();
std::vector<int64_t> output_shape = output_tensor_info.GetShape();

size_t output_size = 1;
for (int64_t dim : output_shape) {
output_size *= static_cast<size_t>(dim);
}

// Copy output data to vector
std::vector<float> output(output_data, output_data + output_size);
return output;

} catch (const Ort::Exception& e) {
LOG_WARNING << "ONNX exception during inference: " << e.what();
return {};
} catch (const std::exception& e) {
LOG_WARNING << "Exception during inference: " << e.what();
return {};
}
}

int ONNXModelHandler::getInputFeatureCount() const
{
return _input_feature_count;
}

int ONNXModelHandler::getOutputFeatureCount() const
{
return _output_feature_count;
}

} // namespace ipl

src/operation/iPL/source/module/ai_predictor/wirelength/onnx_model_handler.hh → src/ai/predictor/ipl_wirelength/onnx_model_handler.hh View File


+ 148
- 0
src/ai/predictor/ipl_wirelength/wirelength_predictor.cc View File

@@ -0,0 +1,148 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************

#include "wirelength_predictor.hh"

#include <iostream>

#include "Log.hh"

namespace ipl {

WirelengthPredictor::WirelengthPredictor()
: _model_handler(std::make_unique<ONNXModelHandler>()),
_via_normalizer(std::make_unique<NormalizationHandler>()),
_wirelength_normalizer(std::make_unique<NormalizationHandler>()),
_is_wirelength_model(false)
{
LOG_INFO << "Wirelength predictor initialized";
}

bool WirelengthPredictor::loadModel(const std::string& model_path)
{
if (!_model_handler->loadModel(model_path)) {
return false;
}

// Assume all models are wirelength models by default
_is_wirelength_model = true;

LOG_INFO << "Successfully loaded wirelength prediction model from " << model_path;
return true;
}

float WirelengthPredictor::predictWirelength(const std::vector<float>& features)
{
if (!isModelLoaded()) {
LOG_WARNING << "Model not loaded";
return -1.0f;
}

if (!_is_wirelength_model) {
LOG_WARNING << "Loaded model is not a wirelength prediction model";
return -1.0f;
}

std::vector<float> normalized_features = normalizeFeatures(features, true);

std::vector<float> output = _model_handler->predict(normalized_features);
if (output.empty()) {
LOG_WARNING << "Prediction failed";
return -1.0f;
}

float prediction = output[0];
LOG_INFO << "Net wirelength prediction: " << prediction;
return prediction;
}

float WirelengthPredictor::predictViaCount(int net_id, const std::vector<float>& features)
{
if (!isModelLoaded()) {
LOG_WARNING << "Model not loaded";
return -1.0f;
}

if (_is_wirelength_model) {
LOG_WARNING << "Loaded model is not a via count prediction model";
return -1.0f;
}

std::vector<float> normalized_features = normalizeFeatures(features, false);

std::vector<float> output = _model_handler->predict(normalized_features);
if (output.empty()) {
LOG_WARNING << "Prediction failed";
return -1.0f;
}

float prediction = output[0];
LOG_INFO << "Net " << net_id << " via count prediction: " << prediction;
return prediction;
}

bool WirelengthPredictor::loadViaNormalizationParams(const std::string& params_path)
{
if (!_via_normalizer->loadMinMaxParams(params_path)) {
LOG_WARNING << "Failed to load via normalization parameters: " << params_path;
return false;
}
LOG_INFO << "Successfully loaded via normalization parameters: " << params_path;
return true;
}

bool WirelengthPredictor::loadWirelengthNormalizationParams(const std::string& params_path)
{
if (!_wirelength_normalizer->loadMinMaxParams(params_path)) {
LOG_WARNING << "Failed to load wirelength normalization parameters: " << params_path;
return false;
}
LOG_INFO << "Successfully loaded wirelength normalization parameters: " << params_path;
return true;
}

int WirelengthPredictor::getRequiredFeatureCount() const
{
if (!isModelLoaded()) {
LOG_WARNING << "Model not loaded";
return 0;
}

return _model_handler->getInputFeatureCount();
}

bool WirelengthPredictor::isModelLoaded() const
{
// Check if model handler has loaded a model
return _model_handler->getInputFeatureCount() > 0;
}

std::vector<float> WirelengthPredictor::normalizeFeatures(const std::vector<float>& features, bool is_wirelength)
{
if (is_wirelength && _wirelength_normalizer && _wirelength_normalizer->isReady()) {
std::vector<float> normalized = _wirelength_normalizer->normalize(features);
return normalized;
} else if (!is_wirelength && _via_normalizer && _via_normalizer->isReady()) {
std::vector<float> normalized = _via_normalizer->normalize(features);
return normalized;
} else {
LOG_WARNING << "Warning: normalization parameters not loaded, using raw features";
return features;
}
}

} // namespace ipl

src/operation/iPL/source/module/ai_predictor/wirelength/wirelength_predictor.hh → src/ai/predictor/ipl_wirelength/wirelength_predictor.hh View File


+ 6
- 11
src/database/manager/builder/verilog_builder/verilog_read.cpp View File

@@ -439,7 +439,8 @@ int32_t RustVerilogRead::build_assign()
auto* the_left_io_pin = idb_io_pin_list->find_pin(left_net_name.c_str());
auto* the_right_io_pin = idb_io_pin_list->find_pin(right_net_name.c_str());

if (the_left_idb_net && the_right_idb_net && !the_left_io_pin && !the_right_io_pin) {
if ((the_left_idb_net && the_right_idb_net && !the_left_io_pin && !the_right_io_pin)
|| (the_left_idb_net && the_right_idb_net && the_left_io_pin && the_right_io_pin)) {
// assign net = net, need merge two net to one net.

// std::cout << "merge " << left_net_name << " = " << right_net_name << "\n";
@@ -473,18 +474,18 @@ int32_t RustVerilogRead::build_assign()
idb_net_list->remove_net(left_net_name);
remove_to_merge_nets[left_net_name] = the_right_idb_net;

} else if (the_left_idb_net && !the_left_io_pin) {
// assign net = input_port;
} else if (the_left_idb_net && !the_left_io_pin) {
if (the_right_io_pin && the_right_io_pin->is_io_pin()) {
// assign net = input_port;
the_left_idb_net->add_io_pin(the_right_io_pin);
the_right_io_pin->set_net(the_left_idb_net);
the_right_io_pin->set_net_name(the_left_idb_net->get_net_name());
} else {
std::cout << "assign " << left_net_name << " = " << right_net_name << " is not processed." << "\n";
}
} else if (the_right_idb_net && !the_right_io_pin) {
// assign output_port = net;
} else if (the_right_idb_net && !the_right_io_pin) {
if (the_left_io_pin->is_io_pin()) {
// assign output_port = net;
the_right_idb_net->add_io_pin(the_left_io_pin);
the_left_io_pin->set_net(the_right_idb_net);
the_left_io_pin->set_net_name(the_right_idb_net->get_net_name());
@@ -519,12 +520,6 @@ int32_t RustVerilogRead::build_assign()
} else {
std::cout << "assign " << left_net_name << " = " << right_net_name << " is not processed." << "\n";
}
} else if (the_left_idb_net && the_right_idb_net && the_left_io_pin && the_right_io_pin) {
// assign output_port = output_port
the_left_idb_net->add_io_pin(the_right_io_pin);
the_right_io_pin->set_net(the_left_idb_net);
the_right_io_pin->set_net_name(the_left_idb_net->get_net_name());

} else {
std::cout << "assign " << left_net_name << " = " << right_net_name << " is not processed." << "\n";
}


+ 45
- 45
src/database/manager/builder/verilog_builder/verilog_write.cpp View File

@@ -122,21 +122,21 @@ void VerilogWriter::writePorts()
continue;
}

if (bus_processed.contains(pin_bus_name)) {
continue;
}
// if (bus_processed.contains(pin_bus_name)) {
// continue;
// }

if (!first) {
fprintf(_stream, ",\n");
}

bus_processed.insert(pin_bus_name);
// bus_processed.insert(pin_bus_name);

fprintf(_stream, "%s", pin_bus_name.c_str());
fprintf(_stream, "\\%s ", pin_name.c_str());
first = false;
}

fprintf(_stream, "\n);\n");
fprintf(_stream, ");\n");
}

/**
@@ -186,26 +186,26 @@ void VerilogWriter::writePortDcls()
continue;
}

if (bus_processed.contains(pin_bus_name)) {
continue;
}
// if (bus_processed.contains(pin_bus_name)) {
// continue;
// }

bus_processed.insert(pin_bus_name);
// bus_processed.insert(pin_bus_name);

auto pin_bus = _idb_design.get_bus_list()->findBus(pin_bus_name);
unsigned int bus_left = pin_bus->get().get_left();
unsigned int bus_right = pin_bus->get().get_right();
// auto pin_bus = _idb_design.get_bus_list()->findBus(pin_bus_name);
// unsigned int bus_left = pin_bus->get().get_left();
// unsigned int bus_right = pin_bus->get().get_right();

IdbConnectDirection port_dir = io_pin->get_term()->get_direction();

const char* bus_range = ieda::Str::printf("[%d:%d]", bus_left, bus_right);
// const char* bus_range = ieda::Str::printf("[%d:%d]", bus_left, bus_right);

if (port_dir == IdbConnectDirection::kInput) {
fprintf(_stream, "input %s %s ;\n", bus_range, pin_bus_name.c_str());
fprintf(_stream, "input \\%s ;\n", pin_name.c_str());
} else if (port_dir == IdbConnectDirection::kOutput) {
fprintf(_stream, "output %s %s ;\n", bus_range, pin_bus_name.c_str());
fprintf(_stream, "output \\%s ;\n", pin_name.c_str());
} else if (port_dir == IdbConnectDirection::kInOut) {
fprintf(_stream, "inout %s %s ;\n", bus_range, pin_bus_name.c_str());
fprintf(_stream, "inout \\%s ;\n", pin_name.c_str());
} else {
continue;
}
@@ -248,41 +248,41 @@ void VerilogWriter::writeWire()
fprintf(_stream, "wire %s ;\n", escape_net_name.c_str());
}

std::set<std::string> bus_processed;
for (const auto& net : net_list) {
std::string net_name = net->get_net_name();
// std::set<std::string> bus_processed;
// for (const auto& net : net_list) {
// std::string net_name = net->get_net_name();

auto [net_bus_name, is_bus] = ieda::Str::matchBusName(net_name.c_str());
// auto [net_bus_name, is_bus] = ieda::Str::matchBusName(net_name.c_str());

if (net_bus_name.back() == '\\') {
is_bus = std::nullopt;
}
// if (net_bus_name.back() == '\\') {
// is_bus = std::nullopt;
// }

// bus of bus is not printed as bus
if (std::ranges::count(net_name, '[') > 1) {
is_bus = std::nullopt;
}
// // bus of bus is not printed as bus
// if (std::ranges::count(net_name, '[') > 1) {
// is_bus = std::nullopt;
// }

if (!is_bus) {
continue;
}
// if (!is_bus) {
// continue;
// }

if (bus_processed.contains(net_bus_name)) {
continue;
}
// if (bus_processed.contains(net_bus_name)) {
// continue;
// }

bus_processed.insert(net_bus_name);
// remove all "\" in net_bus_name
net_bus_name.erase(std::remove(net_bus_name.begin(), net_bus_name.end(), '\\'), net_bus_name.end());
auto net_bus = _idb_design.get_bus_list()->findBus(net_bus_name);
assert(net_bus);
int bus_left = net_bus->get().get_left();
int bus_right = net_bus->get().get_right();
// bus_processed.insert(net_bus_name);
// // remove all "\" in net_bus_name
// net_bus_name.erase(std::remove(net_bus_name.begin(), net_bus_name.end(), '\\'), net_bus_name.end());
// auto net_bus = _idb_design.get_bus_list()->findBus(net_bus_name);
// assert(net_bus);
// int bus_left = net_bus->get().get_left();
// int bus_right = net_bus->get().get_right();

std::string escape_bus_net_name = escapeName(net_bus_name);
// std::string escape_bus_net_name = escapeName(net_bus_name);

fprintf(_stream, "wire [%d:%d] %s ;\n", bus_left, bus_right, escape_bus_net_name.c_str());
}
// fprintf(_stream, "wire [%d:%d] %s ;\n", bus_left, bus_right, escape_bus_net_name.c_str());
// }
}

/**
@@ -309,7 +309,7 @@ void VerilogWriter::writeAssign()
// assign output_port = net;
// assign output_port = input_port;
if (io_pin->get_term()->get_direction() == IdbConnectDirection::kOutput && io_pin->get_pin_name() != net_name) {
fprintf(_stream, "assign %s = %s ;\n", escape_net_name.c_str(), escape_io_pin_name.c_str());
fprintf(_stream, "assign %s = %s ;\n", escape_io_pin_name.c_str(), escape_net_name.c_str());
}
}
}


+ 1
- 1
src/database/manager/parser/verilog/verilog-rust/verilog-parser/src/verilog_parser/mod.rs View File

@@ -958,8 +958,8 @@ pub fn flatten_module(verilog_file: &mut verilog_data::VerilogFile, top_module_n
verilog_file.set_top_module_name(top_module_name);
let module_map = verilog_file.get_module_map();
if module_map.len() > 1 {
println!("flatten module {} start", top_module_name);
let the_module = module_map.get(top_module_name).unwrap();
println!("flatten module {} start", top_module_name);
let mut have_sub_module;
loop {
have_sub_module = false;


+ 1
- 1
src/interface/python/py_vec/py_register_vec.h View File

@@ -26,7 +26,7 @@ void register_vectorization(py::module& m)
{
m.def("layout_patchs", layout_patchs, py::arg("path"));
m.def("layout_graph", layout_graph, py::arg("path"));
m.def("generate_vectors", generate_vectors, py::arg("dir"), py::arg("patch_row_step") = 9, py::arg("patch_col_step") = 9);
m.def("generate_vectors", generate_vectors, py::arg("dir"), py::arg("patch_row_step") = 9, py::arg("patch_col_step") = 9, py::arg("batch_mode") = true);
m.def("read_vectors_nets", read_vectors_nets, py::arg("dir"));
m.def("read_vectors_nets_patterns", read_vectors_nets_patterns, py::arg("path"));



+ 2
- 2
src/interface/python/py_vec/py_vec.cpp View File

@@ -35,13 +35,13 @@ bool layout_graph(const std::string& path)
return lm_api.buildVectorizationGraphData(path);
}

bool generate_vectors(std::string dir, int patch_row_step, int patch_col_step)
bool generate_vectors(std::string dir, int patch_row_step, int patch_col_step, bool batch_mode)
{
if (dir == "") {
dir = "./vectors";
}
ivec::VectorizationApi lm_api;
return lm_api.buildVectorizationFeature(dir, patch_row_step, patch_col_step);
return lm_api.buildVectorizationFeature(dir, patch_row_step, patch_col_step, batch_mode);
}

ieval::TimingWireGraph get_timing_wire_graph(std::string wire_graph_path)


+ 1
- 1
src/interface/python/py_vec/py_vec.h View File

@@ -24,7 +24,7 @@ namespace python_interface {

bool layout_patchs(const std::string& path);
bool layout_graph(const std::string& path);
bool generate_vectors(std::string dir, int patch_row_step, int patch_col_step);
bool generate_vectors(std::string dir, int patch_row_step, int patch_col_step, bool batch_mode);
bool read_vectors_nets(std::string dir);
bool read_vectors_nets_patterns(std::string path);



+ 2
- 2
src/operation/iDRC/interface/DRCInterface.cpp View File

@@ -678,7 +678,7 @@ std::vector<ids::Shape> DRCInterface::buildEnvShapeList()
}
}
}
// io pin
// io pin without net
for (idb::IdbPin* idb_io_pin : idb_io_pin_list) {
for (idb::IdbLayerShape* port_box : idb_io_pin->get_port_box_list()) {
total_env_shape_num += port_box->get_rect_list().size();
@@ -811,7 +811,7 @@ std::vector<ids::Shape> DRCInterface::buildEnvShapeList()
}
}
}
// io pin
// io pin without net
for (idb::IdbPin* idb_io_pin : idb_io_pin_list) {
int32_t net_idx = -1;
if (!isSkipping(idb_io_pin->get_net())) {


+ 4
- 0
src/operation/iFP/source/module/tap_cell/tapcell.cpp View File

@@ -40,6 +40,10 @@ bool TapCellPlacer::tapCells(double distance, std::string tapcell_name, std::str
auto idb_layout = dmInst->get_idb_layout();
auto core_site = idb_layout->get_sites()->get_core_site();

if (core_site == nullptr) {
return false;
}

if (inst_space % core_site->get_width() == 0) {
return true;
} else {


+ 8
- 0
src/operation/iPA/source/module/core/PwrArc.hh View File

@@ -68,9 +68,17 @@ class PwrInstArc : public PwrArc {
}
auto* get_power_arc_set() { return _power_arc_set; }

void set_internal_power(double internal_power) {
_internal_power = internal_power;
}
[[nodiscard]] double getInternalPower() const { return _internal_power.value_or(0.0); }
[[nodiscard]] auto get_internal_power() const { return _internal_power; }

private:
LibPowerArcSet* _power_arc_set =
nullptr; //!< The cell internal power set of different when condition.

std::optional<double> _internal_power; //!< The arc internal power.
};

/**


+ 8
- 0
src/operation/iPA/source/module/core/PwrVertex.hh View File

@@ -121,6 +121,12 @@ class PwrVertex {
[[nodiscard]] unsigned is_clock_network() const { return _is_clock_network; }
void set_is_clock_network() { _is_clock_network = 1; }

void set_internal_power(double internal_power) {
_internal_power = internal_power;
}
[[nodiscard]] double getInternalPower() const { return _internal_power.value_or(0.0); }
[[nodiscard]] auto get_internal_power() const { return _internal_power; }

void addSrcArc(PwrArc* src_arc) { _src_arcs.emplace_back(src_arc); }
void addSnkArc(PwrArc* snk_arc) { _snk_arcs.emplace_back(snk_arc); }
auto& get_src_arcs() { return _src_arcs; }
@@ -196,6 +202,8 @@ class PwrVertex {
unsigned _is_clock_network : 1 = 0; //!< The vertex is clock nerwork.
unsigned _reserved : 23 = 0; //!< reserved.

std::optional<double> _internal_power; //!< The pin internal power.

StaVertex* _sta_vertex; //!< The mapped sta vertex.
std::vector<PwrArc*> _src_arcs; //!< The power arc sourced from the vertex.
std::vector<PwrArc*> _snk_arcs; //!< The power arc sinked to the vertex.


+ 27
- 13
src/operation/iPA/source/module/ops/calc_power/PwrCalcInternalPower.cc View File

@@ -23,6 +23,7 @@
*/

#include "PwrCalcInternalPower.hh"

#include "PwrCalcSPData.hh"
namespace ipower {
using ieda::Stats;
@@ -230,7 +231,7 @@ double PwrCalcInternalPower::calcOutputPinPower(Instance* inst,
if (!power_arc_set) {
continue;
}
LibPowerArc* power_arc;
FOREACH_POWER_LIB_ARC(power_arc_set, power_arc) {
auto [rise_power_mw, rise_input_slew_ns, rise_output_load] =
@@ -255,11 +256,14 @@ double PwrCalcInternalPower::calcOutputPinPower(Instance* inst,
if (!when.empty()) {
// get the sp data of this condition.
double sp_value = calcSPByWhen(when.c_str(), inst);
pin_internal_power += sp_value * the_arc_power;
the_arc_power = sp_value * the_arc_power;
pin_internal_power += the_arc_power;
} else {
pin_internal_power += the_arc_power;
}

dynamic_cast<PwrInstArc*>(snk_arc)->set_internal_power(the_arc_power);

// for debug
if (0) {
std::ofstream out_debug("internal_out.txt");
@@ -461,11 +465,18 @@ double PwrCalcInternalPower::calcCombInternalPower(Instance* inst) {
}

FOREACH_INSTANCE_PIN(inst, pin) {
auto* the_pwr_graph = get_the_pwr_graph();
auto* the_sta_graph = the_pwr_graph->get_sta_graph();
auto the_sta_vertex = the_sta_graph->findVertex(pin);
auto* the_pwr_vertex = the_pwr_graph->staToPwrVertex(*the_sta_vertex);

// for inout pin, we need calc input and output both.
if (pin->isInput()) {
/*calc input port power*/
double pin_internal_power =
calcCombInputPinPower(inst, pin, input_sum_toggle, output_pin_toggle);
the_pwr_vertex->set_internal_power(pin_internal_power);
inst_internal_power += pin_internal_power;
}

@@ -511,22 +522,25 @@ double PwrCalcInternalPower::calcSeqInternalPower(Instance* inst) {
}

if (pin->isInput()) {
double pin_internal_power = 0.0;
/*calc input port power*/
double pin_internal_power = calcSeqInputPinPower(inst, pin);
inst_internal_power += pin_internal_power;
if ((*the_sta_vertex)->is_clock()) {
/*calc clk power*/
pin_internal_power =
calcClockPinPower(inst, pin, output_pin_toggle);
inst_internal_power += pin_internal_power;
} else {
pin_internal_power = calcSeqInputPinPower(inst, pin);
inst_internal_power += pin_internal_power;
}

the_pwr_vertex->set_internal_power(pin_internal_power);

} else if (pin->isOutput()) {
} else {
/*calc output port power*/
double pin_internal_power = calcOutputPinPower(inst, pin);
inst_internal_power += pin_internal_power;
} else if ((*the_sta_vertex)->is_clock()) {
/*calc clk power*/
double pin_internal_power =
calcClockPinPower(inst, pin, output_pin_toggle);
inst_internal_power += pin_internal_power;
} else {
/*calc cdn power*/
}
}
}

return inst_internal_power;


+ 183
- 0
src/operation/iPA/source/module/ops/dump/PwrDumpGraph.cc View File

@@ -32,6 +32,9 @@

#include "core/PwrSeqGraph.hh"
#include "string/Str.hh"
#include "sta/Sta.hh"
#include "api/Power.hh"
#include "sta/StaDump.hh"

namespace ipower {

@@ -279,4 +282,184 @@ void PwrDumpGraphViz::printText(const char* file_name) {
LOG_INFO << "dump graph dotviz end";
}

/**
* @brief dump the power node feature.
*
* @param the_graph
* @return PwrDumpGraphJson::json
*/
PwrDumpGraphJson::json PwrDumpGraphJson::dumpNodeFeature(PwrGraph* the_graph) {
json all_vertex_node_feature_array = json::array();

auto* the_sta_graph = the_graph->get_sta_graph();
auto* nl = the_sta_graph->get_nl();
auto [die_width, die_height] = nl->get_die_size().value();

auto& pwr_vertexes = the_graph->get_vertexes();
for (auto& pwr_vertex : pwr_vertexes) {
auto* the_sta_vertex = pwr_vertex->get_sta_vertex();
json one_node_feature_array = json::array();

auto* the_obj = the_sta_vertex->get_design_obj();
the_obj->isPort() ? one_node_feature_array.push_back(1.0) // is_port
: one_node_feature_array.push_back(0.0);
the_obj->isInput() ? one_node_feature_array.push_back(0.0) // is_input
: one_node_feature_array.push_back(1.0);
// the distance to 4 die boundary, left, right, top, bottom TBD.
if (the_obj->get_coordinate()) {
auto [pin_x, pin_y] = the_obj->get_coordinate().value();
double left_bottom_distance = pin_x + pin_y;
double right_bottom_distance = die_width - pin_x + pin_y;
double left_top_distance = pin_x + die_height - pin_y;
double right_top_distance = die_width - pin_x + die_height - pin_y;

// the order is lb(left bottom), rt, rb, lt
one_node_feature_array.push_back(left_bottom_distance);
one_node_feature_array.push_back(right_top_distance);
one_node_feature_array.push_back(right_bottom_distance);
one_node_feature_array.push_back(left_top_distance);

} else {
// assume the non-pin node is in the left bottom of the die.
one_node_feature_array.push_back(0.0);
one_node_feature_array.push_back(die_width + die_height);
one_node_feature_array.push_back(die_width);
one_node_feature_array.push_back(die_height);
}

// TODO(to taosimin), min or max first? assume min first
double max_rise_cap =
the_sta_vertex->getLoad(AnalysisMode::kMax, TransType::kRise);
double max_fall_cap =
the_sta_vertex->getLoad(AnalysisMode::kMax, TransType::kFall);
double min_rise_cap =
the_sta_vertex->getLoad(AnalysisMode::kMin, TransType::kRise);
double min_fall_cap =
the_sta_vertex->getLoad(AnalysisMode::kMin, TransType::kFall);

one_node_feature_array.push_back(min_rise_cap);
one_node_feature_array.push_back(min_fall_cap);

one_node_feature_array.push_back(max_rise_cap);
one_node_feature_array.push_back(max_fall_cap);

double toggle_data= pwr_vertex->getToggleData(std::nullopt);
double sp_data= pwr_vertex->getSPData(std::nullopt);

one_node_feature_array.push_back(toggle_data);
one_node_feature_array.push_back(sp_data);

all_vertex_node_feature_array.push_back(one_node_feature_array);

}

return all_vertex_node_feature_array;
}

/**
* @brief for net driver node, dump the net power.
*
* @param the_graph
* @return PwrDumpGraphJson::json
*/
PwrDumpGraphJson::json PwrDumpGraphJson::dumpNodeNetPower(PwrGraph* the_graph) {
json all_vertex_node_net_power_array = json::array();

auto* ista = ista::Sta::getOrCreateSta();
Power* ipower = Power::getOrCreatePower(&(ista->get_graph()));

// build switch power map.
auto& switch_powers = ipower->get_switch_powers();
std::map<PwrVertex*, double> vertex_to_switch_power;
for (auto& switch_power : switch_powers) {
auto* the_net = switch_power->get_design_obj();
auto* the_driver = dynamic_cast<ista::Net*>(the_net)->getDriver();
auto* the_sta_vertex = ista->findVertex(the_driver);
auto* the_pwr_vertex = the_graph->staToPwrVertex(the_sta_vertex);
vertex_to_switch_power[the_pwr_vertex] = switch_power->get_switch_power();
}

// get switch power for the vertex.
auto& pwr_vertexes = the_graph->get_vertexes();
for (auto& pwr_vertex : pwr_vertexes) {
double switch_power = 0.0;
if (vertex_to_switch_power.contains(pwr_vertex.get())) {
switch_power = vertex_to_switch_power[pwr_vertex.get()];
}

all_vertex_node_net_power_array.push_back(switch_power);
}


return all_vertex_node_net_power_array;
}

/**
* @brief for input pin node, dump the pin internal power.
*
* @param the_graph
* @return PwrDumpGraphJson::json
*/
PwrDumpGraphJson::json PwrDumpGraphJson::dumpNodeInternalPower(PwrGraph* the_graph) {
json all_vertex_node_internal_power_array = json::array();
auto& pwr_vertexes = the_graph->get_vertexes();
for (auto& pwr_vertex : pwr_vertexes) {
double internal_power = pwr_vertex->getInternalPower();
all_vertex_node_internal_power_array.push_back(internal_power);
}

return all_vertex_node_internal_power_array;
}

/**
* @brief for cell instance power arc, dump the arc power.
*
* @param the_graph
* @return PwrDumpGraphJson::json
*/
PwrDumpGraphJson::json PwrDumpGraphJson::dumpInstInternalPower(PwrGraph* the_graph) {
json all_inst_arc_delay_array = json::array();
auto& pwr_arcs = the_graph->get_arcs();
for (auto& the_pwr_arc : pwr_arcs) {
if (the_pwr_arc->isInstArc()) {
double internal_power = dynamic_cast<PwrInstArc*>(the_pwr_arc.get())->getInternalPower();
all_inst_arc_delay_array.push_back(internal_power);
}
}

return all_inst_arc_delay_array;
}

/**
* @brief dump the power graph json for power predict.
*
* @param the_graph
* @return unsigned
*/
unsigned PwrDumpGraphJson::operator()(PwrGraph* the_graph) {
LOG_INFO << "dump graph json start";
auto* the_sta_graph = the_graph->get_sta_graph();
ista::StaDumpGraphJson dump_graph_json(_json_file);

unsigned num_nodes = the_graph->numVertex();
_json_file["num_nodes"] = num_nodes;

_json_file["edges"] = dump_graph_json.dumpEdges(the_sta_graph);
// dump node features
auto n_node_features = dumpNodeFeature(the_graph);
auto n_net_power = dumpNodeNetPower(the_graph);

_json_file["node_features"]["nf"] = n_node_features;
_json_file["node_features"]["n_net_powers"] = n_node_features;

// dump arc features
auto e_inst_arc_internal_power = dumpInstInternalPower(the_graph);
_json_file["edge_features"]["e_inst_arc_internal_power"] = e_inst_arc_internal_power;


LOG_INFO << "dump graph json end";
return 1;
}

} // namespace ipower

+ 31
- 7
src/operation/iPA/source/module/ops/dump/PwrDumpGraph.hh View File

@@ -1,16 +1,16 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of
// Sciences Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// You can use this software according to the terms and conditions of the Mulan
// PSL v2. You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
@@ -25,6 +25,9 @@
#pragma once

#include <yaml-cpp/yaml.h>

#include "json/json.hpp"

#include <sstream>

#include "core/PwrFunc.hh"
@@ -62,4 +65,25 @@ class PwrDumpGraphViz : public PwrFunc {
std::stringstream _ss; //!< for print information to string stream.
};

/**
* @brief dump the power graph json for power predict.
*
*/
class PwrDumpGraphJson : public PwrFunc {
public:
using json = nlohmann::ordered_json;
PwrDumpGraphJson(json& json_file) : _json_file(json_file) {}
~PwrDumpGraphJson() override = default;

unsigned operator()(PwrGraph* the_graph) override;

json dumpNodeFeature(PwrGraph* the_graph);
json dumpNodeNetPower(PwrGraph* the_graph);
json dumpNodeInternalPower(PwrGraph* the_graph);
json dumpInstInternalPower(PwrGraph* the_graph);

private:
json& _json_file;
};

} // namespace ipower

+ 9
- 15
src/operation/iPL/api/PLAPI.cc View File

@@ -92,7 +92,7 @@ void PLAPI::initAPI(std::string pl_json_path, idb::IdbBuilder* idb_builder)
createPLDirectory();

char config[] = "info_ipl_glog";
char* argv[] = { config };
char* argv[] = {config};

std::string log_home_path = this->obtainTargetDir() + "/pl/log/";
// std::string design_name = idb_builder->get_def_service()->get_design()->get_design_name();
@@ -481,8 +481,8 @@ void PLAPI::runAiFlow(const std::string& onnx_path, const std::string& normaliza
if (isSTAStarted()) {
runPostGP();
} else {
#ifdef BUILD_AI_PREDICTOR
runAIDP(onnx_path, normalization_path);
#ifdef ENABLE_AI
runDPwithAiWireLengthPredictor(onnx_path, normalization_path);
#else
runDP();
#endif
@@ -495,7 +495,6 @@ void PLAPI::runAiFlow(const std::string& onnx_path, const std::string& normaliza
std::cout << std::endl;
LOG_INFO << "Log has been writed to dir: ./result/pl/log/";


if (isSTAStarted()) {
_external_api->destroyTimingEval();
}
@@ -584,8 +583,8 @@ void PLAPI::runDP()
}
}

#ifdef BUILD_AI_PREDICTOR
void PLAPI::runAIDP(const std::string& onnx_path, const std::string& normalization_path)
#ifdef ENABLE_AI
void PLAPI::runDPwithAiWireLengthPredictor(const std::string& onnx_path, const std::string& normalization_path)
{
bool legal_flag = checkLegality();
if (!legal_flag) {
@@ -595,22 +594,17 @@ void PLAPI::runAIDP(const std::string& onnx_path, const std::string& normalizati

DetailPlacer detail_place(PlacerDBInst.get_placer_config(), &PlacerDBInst);

if (!detail_place.loadAIWirelengthModel(onnx_path)) {
if (!detail_place.init_ai_wirelength_model(onnx_path, normalization_path)) {
LOG_ERROR << "Failed to load AI wirelength model: " << onnx_path;
LOG_INFO << "Falling back to traditional HPWL";
} else {
detail_place.setUseAIWirelength(true);
}

if(!detail_place.loadAIWirelengthNormalizationParams(normalization_path)){
LOG_ERROR << "Failed to load AI wirelength normalization parameters: " << normalization_path;
LOG_ERROR << "Falling back to traditional HPWL";
return;
}

detail_place.runDetailPlace();

if (!checkLegality()) {
LOG_WARNING << "DP result is not legal";
}
}
}
#endif



+ 2
- 2
src/operation/iPL/api/PLAPI.hh View File

@@ -58,8 +58,8 @@ class PLAPI
bool runIncrLG(std::vector<std::string> inst_name_list);
void runPostGP();
void runDP();
#ifdef BUILD_AI_PREDICTOR
void runAIDP(const std::string& onnx_path, const std::string& normalization_path);
#ifdef ENABLE_AI
void runDPwithAiWireLengthPredictor(const std::string& onnx_path, const std::string& normalization_path);
#endif
void runBufferInsertion();
void writeBackSourceDataBase();


+ 0
- 8
src/operation/iPL/source/module/CMakeLists.txt View File

@@ -12,7 +12,6 @@ set(iPL_GRID_MANAGER ${iPL_MODULE}/grid_manager)
set(iPL_LOGGER ${iPL_MODULE}/logger)
set(iPL_TOPOLOGY_MANAGER ${iPL_MODULE}/topology_manager)
set(iPL_WRAPPER ${iPL_MODULE}/wrapper)
set(iPL_AI_PREDICTOR ${iPL_MODULE}/ai_predictor)

# add_subdirectory(${iPL_MP})
add_subdirectory(${iPL_IP})
@@ -28,9 +27,6 @@ add_subdirectory(${iPL_GRID_MANAGER})
add_subdirectory(${iPL_LOGGER})
add_subdirectory(${iPL_TOPOLOGY_MANAGER})
add_subdirectory(${iPL_WRAPPER})
if(BUILD_AI_PREDICTOR)
add_subdirectory(${iPL_AI_PREDICTOR})
endif()

add_library(ipl-module INTERFACE)
set(IPL_MODULE_LIBS
@@ -50,10 +46,6 @@ set(IPL_MODULE_LIBS
ipl-module-wrapper
)

if(BUILD_AI_PREDICTOR)
list(APPEND IPL_MODULE_LIBS ipl-module-ai_predictor)
endif()

target_link_libraries(ipl-module
INTERFACE
${IPL_MODULE_LIBS}


+ 0
- 13
src/operation/iPL/source/module/ai_predictor/CMakeLists.txt View File

@@ -1,13 +0,0 @@
#set
set(iPL_WIRELENGTH_PREDICT ${iPL_AI_PREDICTOR}/wirelength)

add_subdirectory(${iPL_WIRELENGTH_PREDICT})

add_library(ipl-module-ai_predictor INTERFACE)

target_link_libraries(ipl-module-ai_predictor
INTERFACE
ipl_module_ai_wirelength
)

target_include_directories(ipl-module-ai_predictor INTERFACE ${iPL_AI_PREDICTOR})

+ 0
- 143
src/operation/iPL/source/module/ai_predictor/wirelength/normalization_handler.cc View File

@@ -1,143 +0,0 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************

#include "normalization_handler.hh"

#include <fstream>
#include <iostream>
#include <sstream>
#include <algorithm>
#include <cmath>

#include "json.hpp"

namespace ipl {

NormalizationHandler::NormalizationHandler() : _is_loaded(false) {}

NormalizationHandler::~NormalizationHandler() {}

bool NormalizationHandler::loadMinMaxParams(const std::string& params_path) {
return _parseJsonParams(params_path);
}

void NormalizationHandler::setMinMaxParams(const std::vector<float>& data_min,
const std::vector<float>& data_max,
const std::vector<std::string>& feature_names) {
if (data_min.size() != data_max.size()) {
std::cerr << "Error: data_min and data_max must have same size" << std::endl;
return;
}

_data_min = data_min;
_data_max = data_max;
_feature_names = feature_names;
_is_loaded = true;
}

std::vector<float> NormalizationHandler::normalize(const std::vector<float>& features) const {
if (!_is_loaded) {
std::cerr << "Error: Normalization parameters not loaded" << std::endl;
return features;
}

if (features.size() != _data_min.size()) {
std::cerr << "Error: Feature size mismatch. Expected " << _data_min.size()
<< ", got " << features.size() << std::endl;
return features;
}

std::vector<float> normalized_features;
normalized_features.reserve(features.size());

for (size_t i = 0; i < features.size(); ++i) {
float range = _data_max[i] - _data_min[i];
if (range == 0.0f) {
// if max == min, normailzed = 0
normalized_features.push_back(0.0f);
} else {
// MinMax normalization: (x - min) / (max - min)
float normalized = (features[i] - _data_min[i]) / range;

normalized = std::max(0.0f, std::min(1.0f, normalized));
normalized_features.push_back(normalized);
}
}

return normalized_features;
}

bool NormalizationHandler::isReady() const {
return _is_loaded;
}

std::vector<std::string> NormalizationHandler::getFeatureNames() const {
return _feature_names;
}

size_t NormalizationHandler::getFeatureCount() const {
return _data_min.size();
}

bool NormalizationHandler::_parseJsonParams(const std::string& params_path) {
std::ifstream file(params_path);
if (!file.is_open()) {
std::cerr << "Error: Cannot open normalization parameters file: " << params_path << std::endl;
return false;
}

try {
nlohmann::json j;
file >> j;

// parse data
if (j.contains("data_min") && j.contains("data_max")) {
_data_min = j["data_min"].get<std::vector<float>>();
_data_max = j["data_max"].get<std::vector<float>>();
if (j.contains("feature_names")) {
_feature_names = j["feature_names"].get<std::vector<std::string>>();
}

if (_data_min.size() != _data_max.size() || _data_min.empty()) {
std::cerr << "Error: Invalid normalization parameters - size mismatch" << std::endl;
return false;
}

_is_loaded = true;

std::cout << "Successfully loaded normalization parameters:" << std::endl;
std::cout << " Features: " << _data_min.size() << std::endl;
std::cout << " Feature names: ";
for (const auto& name : _feature_names) {
std::cout << name << " ";
}
std::cout << std::endl;

return true;
} else {
std::cerr << "Error: Missing required fields in JSON" << std::endl;
return false;
}

} catch (const std::exception& e) {
std::cerr << "Error parsing JSON: " << e.what() << std::endl;
return false;
}
}

} // namespace ipl

+ 0
- 210
src/operation/iPL/source/module/ai_predictor/wirelength/onnx_model_handler.cc View File

@@ -1,210 +0,0 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
#include "onnx_model_handler.hh"

#include <iostream>

namespace ipl {

ONNXModelHandler::ONNXModelHandler() : _env(ORT_LOGGING_LEVEL_WARNING, "ONNXModelHandler") {
// Initialize ONNX Runtime environment
_session_options.SetIntraOpNumThreads(1);
_session_options.SetInterOpNumThreads(1);
_session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_BASIC);
}

ONNXModelHandler::~ONNXModelHandler() {
// Clean up - smart pointers handle this automatically
}

bool ONNXModelHandler::loadModel(const std::string& model_path) {
try {
// Create session from model file
_session = std::make_unique<Ort::Session>(_env, model_path.c_str(), _session_options);

// Get allocator
Ort::AllocatorWithDefaultOptions allocator;

// Get input information
size_t num_input_nodes = _session->GetInputCount();
if (num_input_nodes == 0) {
std::cerr << "Model has no input nodes" << std::endl;
return false;
}

// Get input names and shapes
_input_names.clear();
_input_shapes.clear();
for (size_t i = 0; i < num_input_nodes; i++) {
// Get input name using the correct API
Ort::AllocatedStringPtr input_name_ptr = _session->GetInputNameAllocated(i, allocator);
_input_names.push_back(std::string(input_name_ptr.get()));

// Get input type info
Ort::TypeInfo input_type_info = _session->GetInputTypeInfo(i);
auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
// Get input shape
std::vector<int64_t> input_shape = input_tensor_info.GetShape();
_input_shapes.push_back(input_shape);
}

// Get output information
size_t num_output_nodes = _session->GetOutputCount();
if (num_output_nodes == 0) {
std::cerr << "Model has no output nodes" << std::endl;
return false;
}

// Get output names and shapes
_output_names.clear();
_output_shapes.clear();
for (size_t i = 0; i < num_output_nodes; i++) {
// Get output name
Ort::AllocatedStringPtr output_name_ptr = _session->GetOutputNameAllocated(i, allocator);
_output_names.push_back(std::string(output_name_ptr.get()));

// Get output type info
Ort::TypeInfo output_type_info = _session->GetOutputTypeInfo(i);
auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
// Get output shape
std::vector<int64_t> output_shape = output_tensor_info.GetShape();
_output_shapes.push_back(output_shape);
}

// Validate shapes for our use case
if (_input_shapes[0].size() != 2) {
std::cerr << "Unexpected input shape dimension: " << _input_shapes[0].size() << std::endl;
return false;
}

if (_output_shapes[0].size() != 2) {
std::cerr << "Unexpected output shape dimension: " << _output_shapes[0].size() << std::endl;
return false;
}

// Set feature counts (assuming batch dimension is dynamic or 1)
_input_feature_count = static_cast<int>(_input_shapes[0][1]);
_output_feature_count = static_cast<int>(_output_shapes[0][1]);

std::cout << "Successfully loaded ONNX model from " << model_path << std::endl;
std::cout << "Input name: " << _input_names[0] << std::endl;
std::cout << "Output name: " << _output_names[0] << std::endl;
std::cout << "Input feature count: " << _input_feature_count << std::endl;
std::cout << "Output feature count: " << _output_feature_count << std::endl;

return true;
} catch (const Ort::Exception& e) {
std::cerr << "ONNX exception: " << e.what() << std::endl;
return false;
} catch (const std::exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
return false;
}
}

std::vector<float> ONNXModelHandler::predict(const std::vector<float>& input) {
if (!_session) {
std::cerr << "Model not loaded" << std::endl;
return {};
}

if (input.size() != static_cast<size_t>(_input_feature_count)) {
std::cerr << "Input feature count mismatch: expected " << _input_feature_count
<< ", got " << input.size() << std::endl;
return {};
}

try {
// Create input tensor
const std::vector<int64_t> input_shape = {1, _input_feature_count}; // Batch size 1
Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(
OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);

Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
memory_info, const_cast<float*>(input.data()), input.size(),
input_shape.data(), input_shape.size());

if (!input_tensor.IsTensor()) {
std::cerr << "Failed to create input tensor" << std::endl;
return {};
}

// Prepare input and output names
std::vector<const char*> input_names_cstr;
std::vector<const char*> output_names_cstr;
for (const auto& name : _input_names) {
input_names_cstr.push_back(name.c_str());
}
for (const auto& name : _output_names) {
output_names_cstr.push_back(name.c_str());
}

// Run inference
std::vector<Ort::Value> output_tensors = _session->Run(
Ort::RunOptions{nullptr},
input_names_cstr.data(), &input_tensor, 1,
output_names_cstr.data(), output_names_cstr.size());

if (output_tensors.empty()) {
std::cerr << "Failed to get output tensors" << std::endl;
return {};
}

// Get output data
float* output_data = output_tensors[0].GetTensorMutableData<float>();
if (!output_data) {
std::cerr << "Failed to get output data" << std::endl;
return {};
}

// Get the actual output size
auto output_tensor_info = output_tensors[0].GetTensorTypeAndShapeInfo();
std::vector<int64_t> output_shape = output_tensor_info.GetShape();
size_t output_size = 1;
for (int64_t dim : output_shape) {
output_size *= static_cast<size_t>(dim);
}

// Copy output data to vector
std::vector<float> output(output_data, output_data + output_size);
return output;
} catch (const Ort::Exception& e) {
std::cerr << "ONNX exception during inference: " << e.what() << std::endl;
return {};
} catch (const std::exception& e) {
std::cerr << "Exception during inference: " << e.what() << std::endl;
return {};
}
}

int ONNXModelHandler::getInputFeatureCount() const {
return _input_feature_count;
}

int ONNXModelHandler::getOutputFeatureCount() const {
return _output_feature_count;
}

} // namespace ipl

+ 0
- 146
src/operation/iPL/source/module/ai_predictor/wirelength/wirelength_predictor.cc View File

@@ -1,146 +0,0 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************

#include "wirelength_predictor.hh"

#include <iostream>

namespace ipl {

WirelengthPredictor::WirelengthPredictor() :
_model_handler(std::make_unique<ONNXModelHandler>()),
_via_normalizer(std::make_unique<NormalizationHandler>()),
_wirelength_normalizer(std::make_unique<NormalizationHandler>()),
_is_wirelength_model(false)
{
std::cout << "Wirelength predictor initialized" << std::endl;
}

bool WirelengthPredictor::loadModel(const std::string& model_path)
{
if (!_model_handler->loadModel(model_path)) {
return false;
}

// Assume all models are wirelength models by default
_is_wirelength_model = true;

std::cout << "Successfully loaded wirelength prediction model from " << model_path << std::endl;
return true;
}

float WirelengthPredictor::predictWirelength(const std::vector<float>& features)
{
if (!isModelLoaded()) {
std::cerr << "Model not loaded" << std::endl;
return -1.0f;
}

if (!_is_wirelength_model) {
std::cerr << "Loaded model is not a wirelength prediction model" << std::endl;
return -1.0f;
}

std::vector<float> normalized_features = normalizeFeatures(features, true);

std::vector<float> output = _model_handler->predict(normalized_features);
if (output.empty()) {
std::cerr << "Prediction failed" << std::endl;
return -1.0f;
}

float prediction = output[0];
std::cout << "Net wirelength prediction: " << prediction << std::endl;
return prediction;
}

float WirelengthPredictor::predictViaCount(int net_id, const std::vector<float>& features)
{
if (!isModelLoaded()) {
std::cerr << "Model not loaded" << std::endl;
return -1.0f;
}

if (_is_wirelength_model) {
std::cerr << "Loaded model is not a via count prediction model" << std::endl;
return -1.0f;
}

std::vector<float> normalized_features = normalizeFeatures(features, false);

std::vector<float> output = _model_handler->predict(normalized_features);
if (output.empty()) {
std::cerr << "Prediction failed" << std::endl;
return -1.0f;
}

float prediction = output[0];
std::cout << "Net " << net_id << " via count prediction: " << prediction << std::endl;
return prediction;
}

bool WirelengthPredictor::loadViaNormalizationParams(const std::string& params_path)
{
if (!_via_normalizer->loadMinMaxParams(params_path)) {
std::cerr << "Failed to load via normalization parameters: " << params_path << std::endl;
return false;
}
std::cout << "Successfully loaded via normalization parameters: " << params_path << std::endl;
return true;
}

bool WirelengthPredictor::loadWirelengthNormalizationParams(const std::string& params_path)
{
if (!_wirelength_normalizer->loadMinMaxParams(params_path)) {
std::cerr << "Failed to load wirelength normalization parameters: " << params_path << std::endl;
return false;
}
std::cout << "Successfully loaded wirelength normalization parameters: " << params_path << std::endl;
return true;
}

int WirelengthPredictor::getRequiredFeatureCount() const
{
if (!isModelLoaded()) {
std::cerr << "Model not loaded" << std::endl;
return 0;
}

return _model_handler->getInputFeatureCount();
}

bool WirelengthPredictor::isModelLoaded() const
{
// Check if model handler has loaded a model
return _model_handler->getInputFeatureCount() > 0;
}

std::vector<float> WirelengthPredictor::normalizeFeatures(const std::vector<float>& features, bool is_wirelength)
{
if (is_wirelength && _wirelength_normalizer && _wirelength_normalizer->isReady()) {
std::vector<float> normalized = _wirelength_normalizer->normalize(features);
return normalized;
} else if (!is_wirelength && _via_normalizer && _via_normalizer->isReady()) {
std::vector<float> normalized = _via_normalizer->normalize(features);
return normalized;
} else {
std::cerr << "Warning: normalization parameters not loaded, using raw features" << std::endl;
return features;
}
}

} // namespace ipl

+ 7
- 0
src/operation/iPL/source/module/detail_placer/CMakeLists.txt View File

@@ -37,4 +37,11 @@ target_link_libraries(ipl-module-detail_placer
ipl_module_evaluator_wirelength
)

if(ENABLE_AI)
target_link_libraries(ipl-module-detail_placer
PUBLIC
ipl_predictor_wirelen
)
endif()

target_include_directories(ipl-module-detail_placer PUBLIC ${iPL_DP})

+ 17
- 56
src/operation/iPL/source/module/detail_placer/DetailPlacer.cc View File

@@ -18,14 +18,14 @@

#include "module/evaluator/density/Density.hh"
#include "module/evaluator/wirelength/HPWirelength.hh"
#ifdef BUILD_AI_PREDICTOR
#include "module/evaluator/wirelength/AIWirelength.hh"
#ifdef ENABLE_AI
#include "ai_wirelength.hh"
#endif
#include "operation/BinOpt.hh"
#include "operation/InstanceSwap.hh"
#include "operation/LocalReorder.hh"
#include "operation/RowOpt.hh"
#include "operation/NFSpread.hh"
#include "operation/RowOpt.hh"
#include "usage/usage.hh"
#include "utility/Utility.hh"

@@ -39,12 +39,6 @@ DetailPlacer::DetailPlacer(Config* pl_config, PlacerDB* placer_db)

initDPDatabase(placer_db);
_operator.initDPOperator(&_database, &_config);

#ifdef BUILD_AI_PREDICTOR
// Initialize AI wirelength evaluator
_ai_wirelength_evaluator = std::make_unique<AIWirelength>(_operator.get_topo_manager());
_use_ai_wirelength = false;
#endif
}

DetailPlacer::~DetailPlacer()
@@ -106,8 +100,9 @@ void DetailPlacer::wrapRowList()
DPRow* row = new DPRow(pl_row->get_name(), row_site, pl_row->get_site_num());
row->set_coordinate(row_shift_x, row_shift_y);
row->set_orient(std::move(pl_site->get_orient()));
Rectangle<int64_t> rect(row_shift_x, row_shift_y, row_shift_x + pl_row->get_site_num() * row_site->get_width(), row_shift_y + row_site->get_height());

Rectangle<int64_t> rect(row_shift_x, row_shift_y, row_shift_x + pl_row->get_site_num() * row_site->get_width(),
row_shift_y + row_site->get_height());
row->set_bound(rect);

row_2d_list.at(row_index).push_back(row);
@@ -591,75 +586,41 @@ void DetailPlacer::runDetailPlaceNFS()
double time_delta = dp_status.elapsedRunTime();
LOG_INFO << "Detail Plaement Total Time Elapsed: " << time_delta << "s";
LOG_INFO << "-----------------Finish Network Flow Cell Spreading-----------------";

}

void DetailPlacer::notifyPLPlaceDensity()
{
auto* grid_manager = _operator.get_grid_manager();
PlacerDBInst.place_density[2] = grid_manager->obtainAvgGridDensity();
auto* grid_manager = _operator.get_grid_manager();
PlacerDBInst.place_density[2] = grid_manager->obtainAvgGridDensity();
}

int64_t DetailPlacer::calTotalHPWL()
{
#ifdef BUILD_AI_PREDICTOR
if (_use_ai_wirelength && _ai_wirelength_evaluator && _ai_wirelength_evaluator->isModelLoaded()) {
#ifdef ENABLE_AI
if (_use_ai_wirelength && aiPLWireLengthInst->isModelLoaded()) {
LOG_INFO << "Calculate Total Wirelength using AI model.";
return calTotalAIWirelength() + _database._outside_wl;
} else {
#endif
HPWirelength hpwl_eval(_operator.get_topo_manager());
return hpwl_eval.obtainTotalWirelength() + _database._outside_wl;
#ifdef BUILD_AI_PREDICTOR
#ifdef ENABLE_AI
}
#endif
}

#ifdef BUILD_AI_PREDICTOR
bool DetailPlacer::loadAIWirelengthModel(const std::string& model_path)
#ifdef ENABLE_AI
bool DetailPlacer::init_ai_wirelength_model(const std::string& model_path, const std::string& params_path)
{
if (_ai_wirelength_evaluator) {
bool success = _ai_wirelength_evaluator->loadModel(model_path);
if (success) {
LOG_INFO << "Successfully loaded AI wirelength model: " << model_path;
} else {
LOG_ERROR << "Failed to load AI wirelength model: " << model_path;
}
return success;
}
return false;
}
_use_ai_wirelength = aiPLWireLengthInst->init(model_path, params_path, _operator.get_topo_manager());

bool DetailPlacer::loadAIWirelengthNormalizationParams(const std::string& params_path)
{
if (_ai_wirelength_evaluator) {
bool success = _ai_wirelength_evaluator->loadNormalizationParams(params_path);
if (success) {
LOG_INFO << "Successfully loaded AI wirelength normalization parameters: " << params_path;
} else {
LOG_ERROR << "Failed to load AI wirelength normalization parameters: " << params_path;
}
return success;
}
return false;
}

void DetailPlacer::setUseAIWirelength(bool use_ai)
{
_use_ai_wirelength = use_ai;
if (_use_ai_wirelength) {
if (!_ai_wirelength_evaluator || !_ai_wirelength_evaluator->isModelLoaded()) {
LOG_WARNING << "AI wirelength model not loaded, falling back to HPWL";
_use_ai_wirelength = false;
}
}
LOG_INFO << "AI wirelength prediction " << (_use_ai_wirelength ? "enabled" : "disabled");
return _use_ai_wirelength;
}

int64_t DetailPlacer::calTotalAIWirelength()
{
if (_ai_wirelength_evaluator && _ai_wirelength_evaluator->isModelLoaded()) {
return _ai_wirelength_evaluator->obtainTotalWirelength();
if (_use_ai_wirelength && aiPLWireLengthInst->isModelLoaded()) {
return aiPLWireLengthInst->obtainTotalWirelength();
}
return 0;
}


+ 5
- 9
src/operation/iPL/source/module/detail_placer/DetailPlacer.hh View File

@@ -32,12 +32,11 @@
#include "PlacerDB.hh"
#include "TopologyManager.hh"
#include "database/DPDatabase.hh"
#ifdef BUILD_AI_PREDICTOR
#include "AIWirelength.hh"
#endif

namespace ipl {

class AIWirelength;

class DetailPlacer
{
public:
@@ -57,11 +56,9 @@ class DetailPlacer

void runDetailPlaceNFS();

#ifdef BUILD_AI_PREDICTOR
#ifdef ENABLE_AI
// AI wirelength prediction methods
bool loadAIWirelengthModel(const std::string& model_path);
bool loadAIWirelengthNormalizationParams(const std::string& params_path);
void setUseAIWirelength(bool use_ai);
bool init_ai_wirelength_model(const std::string& model_path, const std::string& params_path);
int64_t calTotalAIWirelength();
#endif

@@ -69,8 +66,7 @@ class DetailPlacer
DPConfig _config;
DPDatabase _database;
DPOperator _operator;
#ifdef BUILD_AI_PREDICTOR
std::unique_ptr<AIWirelength> _ai_wirelength_evaluator;
#ifdef ENABLE_AI
bool _use_ai_wirelength = false;
#endif



+ 0
- 19
src/operation/iPL/source/module/evaluator/wirelength/CMakeLists.txt View File

@@ -3,14 +3,8 @@ set(WIRELENGTH_SOURCES
WAWirelengthGradient.cc
SteinerWirelength.cc
)

if(BUILD_AI_PREDICTOR)
list(APPEND WIRELENGTH_SOURCES AIWirelength.cc)
endif()

add_library(ipl_module_evaluator_wirelength ${WIRELENGTH_SOURCES})


target_link_libraries(ipl_module_evaluator_wirelength
PUBLIC
ipl-module-topology_manager
@@ -21,21 +15,8 @@ target_link_libraries(ipl_module_evaluator_wirelength
ipl-bridge
)

if(BUILD_AI_PREDICTOR)
target_link_libraries(ipl_module_evaluator_wirelength
PUBLIC
ipl-module-ai_predictor
)
endif()

target_include_directories(ipl_module_evaluator_wirelength
PUBLIC
${iPL_WIRELENGTH_EVALUATOR}
)

if(BUILD_AI_PREDICTOR)
target_include_directories(ipl_module_evaluator_wirelength
PUBLIC
${iPL_AI_PREDICTOR}/wirelength
)
endif()

+ 1
- 2
src/operation/iPL/source/module/evaluator/wirelength/Wirelength.hh View File

@@ -27,9 +27,8 @@
#ifndef IPL_EVALUATOR_WIRELENGTH_H
#define IPL_EVALUATOR_WIRELENGTH_H

#include "TopologyManager.hh"

namespace ipl {
class TopologyManager;

class Wirelength
{


+ 5
- 1
src/operation/iPL/source/module/topology_manager/CMakeLists.txt View File

@@ -5,4 +5,8 @@ target_link_libraries(ipl-module-topology_manager
ipl-module-logger
)

target_include_directories(ipl-module-topology_manager PUBLIC ${iPL_TOPOLOGY_MANAGER})
target_include_directories(ipl-module-topology_manager
PUBLIC
${iPL_TOPOLOGY_MANAGER}
${iPL_SOURCE}
)

+ 1
- 1
src/operation/iPL/source/module/topology_manager/TopologyManager.hh View File

@@ -104,7 +104,7 @@ class Node
// setter.
void set_node_id(int32_t id) { _node_id = id; }
void set_node_type(NODE_TYPE node_type) { _node_type = node_type; }
void set_is_io() {_io_flag = true;}
void set_is_io() { _io_flag = true; }
void set_location(Point<int32_t> location) { _location = std::move(location); }

void set_network(NetWork* network) { _network = network; }


+ 0
- 2
src/operation/iPNP/source/config/CMakeLists.txt View File

@@ -10,5 +10,3 @@ target_include_directories(pnp-config
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
)

set(CMAKE_BUILD_TYPE "debug")

+ 0
- 2
src/operation/iPNP/source/module/synthesis/CMakeLists.txt View File

@@ -19,5 +19,3 @@ target_include_directories(pnp-synthesis
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
)

set(CMAKE_BUILD_TYPE "debug")

+ 4
- 2
src/operation/iRT/interface/RTInterface.cpp View File

@@ -633,10 +633,12 @@ void RTInterface::wrapTrackAxis(RoutingLayer& routing_layer, idb::IdbLayerRoutin
ScaleAxis& track_axis = routing_layer.get_track_axis();

ScaleGrid x_track_grid;
x_track_grid.set_start_line(idb_layer->get_offset_x());
x_track_grid.set_step_length(idb_layer->get_pitch_x());
track_axis.get_x_grid_list().push_back(x_track_grid);

ScaleGrid y_track_grid;
y_track_grid.set_start_line(idb_layer->get_offset_y());
y_track_grid.set_step_length(idb_layer->get_pitch_y());
track_axis.get_y_grid_list().push_back(y_track_grid);
}
@@ -932,7 +934,7 @@ void RTInterface::wrapObstacleList()
}
}
}
// io pin
// io pin without net
for (idb::IdbPin* idb_io_pin : idb_io_pin_list) {
if (!isSkipping(idb_io_pin->get_net(), false)) {
continue;
@@ -1047,7 +1049,7 @@ void RTInterface::wrapObstacleList()
}
}
}
// io pin
// io pin without net
for (idb::IdbPin* idb_io_pin : idb_io_pin_list) {
if (!isSkipping(idb_io_pin->get_net(), false)) {
continue;


+ 42
- 9
src/operation/iRT/source/data_manager/DataManager.cpp View File

@@ -599,7 +599,28 @@ std::vector<NetShape> DataManager::getNetDetailedShapeList(int32_t net_idx, Laye

#endif

#if 1 // 获得唯一的pitch
#if 1 // 获得唯一的track

int32_t DataManager::getOnlyOffset()
{
std::vector<RoutingLayer>& routing_layer_list = _database.get_routing_layer_list();

std::vector<int32_t> offset_list;
for (RoutingLayer& routing_layer : routing_layer_list) {
for (ScaleGrid& x_grid : routing_layer.get_track_axis().get_x_grid_list()) {
offset_list.push_back(x_grid.get_start_line());
}
for (ScaleGrid& y_grid : routing_layer.get_track_axis().get_y_grid_list()) {
offset_list.push_back(y_grid.get_start_line());
}
}
for (int32_t offset : offset_list) {
if (offset_list.front() != offset) {
RTLOG.error(Loc::current(), "The offset is not equal!");
}
}
return offset_list.front();
}

int32_t DataManager::getOnlyPitch()
{
@@ -768,16 +789,27 @@ void DataManager::makeRoutingLayerList()
}
return frequent_num;
};
int32_t start_line;
{
std::vector<int32_t> start_line_list;
for (RoutingLayer& routing_layer : routing_layer_list) {
start_line_list.push_back(routing_layer.getPreferTrackGridList().front().get_start_line());
}
start_line = getFrequentNum(start_line_list);
}
int32_t step_length;
{
std::vector<int32_t> pitch_list;
std::vector<int32_t> step_length_list;
for (RoutingLayer& routing_layer : routing_layer_list) {
pitch_list.push_back(routing_layer.getPreferTrackGridList().front().get_step_length());
step_length_list.push_back(routing_layer.getPreferTrackGridList().front().get_step_length());
}
step_length = getFrequentNum(pitch_list);
step_length = getFrequentNum(step_length_list);
}
auto getScaleGrid = [](int32_t real_ll_scale, int32_t real_ur_scale, int32_t step_length) {
int32_t start_line = real_ll_scale + step_length;
auto getScaleGrid = [](int32_t real_ll_scale, int32_t real_ur_scale, int32_t start_line, int32_t step_length) {
while (start_line < step_length) {
start_line += step_length;
}
start_line += real_ll_scale;
int32_t step_num = (real_ur_scale - start_line) / step_length;
int32_t end_line = start_line + step_num * step_length;
if (end_line > real_ur_scale) {
@@ -797,8 +829,8 @@ void DataManager::makeRoutingLayerList()
};
ScaleAxis track_axis;
{
track_axis.get_x_grid_list().push_back(getScaleGrid(die.get_real_ll_x(), die.get_real_ur_x(), step_length));
track_axis.get_y_grid_list().push_back(getScaleGrid(die.get_real_ll_y(), die.get_real_ur_y(), step_length));
track_axis.get_x_grid_list().push_back(getScaleGrid(die.get_real_ll_x(), die.get_real_ur_x(), start_line, step_length));
track_axis.get_y_grid_list().push_back(getScaleGrid(die.get_real_ll_y(), die.get_real_ur_y(), start_line, step_length));
}
for (RoutingLayer& routing_layer : routing_layer_list) {
routing_layer.set_track_axis(track_axis);
@@ -951,6 +983,7 @@ std::vector<ScaleGrid> DataManager::makeGCellGridList(Direction direction)
Die& die = _database.get_die();
Row& row = _database.get_row();
int32_t row_height = row.get_height();
int32_t only_offset = getOnlyOffset();
int32_t only_pitch = getOnlyPitch();

int32_t die_start_scale = (direction == Direction::kVertical ? die.get_real_ll_x() : die.get_real_ll_y());
@@ -959,7 +992,7 @@ std::vector<ScaleGrid> DataManager::makeGCellGridList(Direction direction)

std::vector<int32_t> gcell_scale_list;
gcell_scale_list.push_back(die_start_scale);
for (int32_t gcell_scale = die_start_scale + (only_pitch / 2); gcell_scale <= die_end_scale; gcell_scale += step_length) {
for (int32_t gcell_scale = only_offset - (only_pitch / 2); gcell_scale <= die_end_scale; gcell_scale += step_length) {
gcell_scale_list.push_back(gcell_scale);
}
gcell_scale_list.push_back(die_end_scale);


+ 2
- 1
src/operation/iRT/source/data_manager/DataManager.hpp View File

@@ -68,7 +68,8 @@ class DataManager
std::vector<NetShape> getNetDetailedShapeList(int32_t net_idx, LayerCoord& first_coord, LayerCoord& second_coord);
#endif

#if 1 // 获得唯一的pitch
#if 1 // 获得唯一的结果
int32_t getOnlyOffset();
int32_t getOnlyPitch();
#endif



+ 2
- 0
src/operation/iRT/source/module/detailed_router/DetailedRouter.cpp View File

@@ -3043,6 +3043,7 @@ void DetailedRouter::outputNetCSV(DRModel& dr_model)
}
RTUTIL.closeFileStream(net_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void DetailedRouter::outputViolationCSV(DRModel& dr_model)
@@ -3078,6 +3079,7 @@ void DetailedRouter::outputViolationCSV(DRModel& dr_model)
}
RTUTIL.closeFileStream(violation_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void DetailedRouter::outputJson(DRModel& dr_model)


+ 3
- 0
src/operation/iRT/source/module/layer_assigner/LayerAssigner.cpp View File

@@ -1022,6 +1022,7 @@ void LayerAssigner::outputGuide(LAModel& la_model)
}
}
RTUTIL.closeFileStream(guide_file_stream);
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void LayerAssigner::outputNetCSV(LAModel& la_model)
@@ -1044,6 +1045,7 @@ void LayerAssigner::outputNetCSV(LAModel& la_model)
}
RTUTIL.closeFileStream(net_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void LayerAssigner::outputOverflowCSV(LAModel& la_model)
@@ -1068,6 +1070,7 @@ void LayerAssigner::outputOverflowCSV(LAModel& la_model)
}
RTUTIL.closeFileStream(overflow_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void LayerAssigner::outputJson(LAModel& la_model)


+ 2
- 0
src/operation/iRT/source/module/pin_accessor/PinAccessor.cpp View File

@@ -3462,6 +3462,7 @@ void PinAccessor::outputNetCSV(PAModel& pa_model)
}
RTUTIL.closeFileStream(net_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void PinAccessor::outputViolationCSV(PAModel& pa_model)
@@ -3497,6 +3498,7 @@ void PinAccessor::outputViolationCSV(PAModel& pa_model)
}
RTUTIL.closeFileStream(violation_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void PinAccessor::outputJson(PAModel& pa_model)


+ 3
- 0
src/operation/iRT/source/module/space_router/SpaceRouter.cpp View File

@@ -1764,6 +1764,7 @@ void SpaceRouter::outputGuide(SRModel& sr_model)
}
}
RTUTIL.closeFileStream(guide_file_stream);
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void SpaceRouter::outputNetCSV(SRModel& sr_model)
@@ -1787,6 +1788,7 @@ void SpaceRouter::outputNetCSV(SRModel& sr_model)
}
RTUTIL.closeFileStream(net_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void SpaceRouter::outputOverflowCSV(SRModel& sr_model)
@@ -1811,6 +1813,7 @@ void SpaceRouter::outputOverflowCSV(SRModel& sr_model)
}
RTUTIL.closeFileStream(overflow_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void SpaceRouter::outputJson(SRModel& sr_model)


+ 2
- 0
src/operation/iRT/source/module/supply_analyzer/SupplyAnalyzer.cpp View File

@@ -456,6 +456,7 @@ void SupplyAnalyzer::outputPlanarSupplyCSV(SAModel& sa_model)
RTUTIL.pushStream(supply_csv_file, "\n");
}
RTUTIL.closeFileStream(supply_csv_file);
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void SupplyAnalyzer::outputLayerSupplyCSV(SAModel& sa_model)
@@ -482,6 +483,7 @@ void SupplyAnalyzer::outputLayerSupplyCSV(SAModel& sa_model)
}
RTUTIL.closeFileStream(supply_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

#endif


+ 3
- 0
src/operation/iRT/source/module/topology_generator/TopologyGenerator.cpp View File

@@ -984,6 +984,7 @@ void TopologyGenerator::outputGuide(TGModel& tg_model)
}
}
RTUTIL.closeFileStream(guide_file_stream);
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void TopologyGenerator::outputNetCSV(TGModel& tg_model)
@@ -1002,6 +1003,7 @@ void TopologyGenerator::outputNetCSV(TGModel& tg_model)
RTUTIL.pushStream(net_csv_file, "\n");
}
RTUTIL.closeFileStream(net_csv_file);
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void TopologyGenerator::outputOverflowCSV(TGModel& tg_model)
@@ -1020,6 +1022,7 @@ void TopologyGenerator::outputOverflowCSV(TGModel& tg_model)
RTUTIL.pushStream(overflow_csv_file, "\n");
}
RTUTIL.closeFileStream(overflow_csv_file);
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void TopologyGenerator::outputJson(TGModel& tg_model)


+ 2
- 0
src/operation/iRT/source/module/track_assigner/TrackAssigner.cpp View File

@@ -1483,6 +1483,7 @@ void TrackAssigner::outputNetCSV(TAModel& ta_model)
}
RTUTIL.closeFileStream(net_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void TrackAssigner::outputViolationCSV(TAModel& ta_model)
@@ -1518,6 +1519,7 @@ void TrackAssigner::outputViolationCSV(TAModel& ta_model)
}
RTUTIL.closeFileStream(violation_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void TrackAssigner::outputJson(TAModel& ta_model)


+ 2
- 0
src/operation/iRT/source/module/violation_reporter/ViolationReporter.cpp View File

@@ -470,6 +470,7 @@ void ViolationReporter::outputNetCSV(VRModel& vr_model)
}
RTUTIL.closeFileStream(net_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void ViolationReporter::outputViolationCSV(VRModel& vr_model)
@@ -505,6 +506,7 @@ void ViolationReporter::outputViolationCSV(VRModel& vr_model)
}
RTUTIL.closeFileStream(violation_csv_file);
}
RTLOG.info(Loc::current(), "The csv file has been saved");
}

void ViolationReporter::outputJson(VRModel& vr_model)


+ 3
- 1
src/operation/iSTA/source/module/netlist/NetlistWriter.cc View File

@@ -100,7 +100,9 @@ void NetlistWriter::writePorts() {
}

const char *port_bus_name = port_bus->get_name();
fprintf(_stream, "%s", port_bus_name);
for (int i = 0; i < port_bus->get_size(); i++) {
fprintf(_stream, "%s[%d]\n", port_bus_name, i);
}
first = false;
}



+ 1
- 1
src/operation/iSTA/source/module/netlist/Port.hh View File

@@ -115,7 +115,7 @@ class PortBus : public DesignObject {
auto& getPorts() { return _ports; }

std::string getFullName() override { return get_name(); }
unsigned get_size() { return _size; }
private:
unsigned _left; //!< The left range.
unsigned _right; //!< The right range.


+ 0
- 2
src/operation/iSTA/source/module/sta/CMakeLists.txt View File

@@ -32,5 +32,3 @@ target_link_libraries(sta
propagation-gpu
)
endif()

set(CMAKE_BUILD_TYPE "Debug")

+ 6
- 9
src/operation/iSTA/source/module/sta/Sta.cc View File

@@ -789,7 +789,9 @@ void Sta::linkDesignWithRustParser(const char *top_cell_name) {
auto *the_left_port = design_netlist.findPort(left_net_name.c_str());
auto *the_right_port = design_netlist.findPort(right_net_name.c_str());

if (the_left_net && the_right_net && !the_left_port && !the_right_port) {
if ((the_left_net && the_right_net && !the_left_port && !the_right_port) ||
(the_left_net && the_right_net && the_left_port && the_right_port)) {
// assign net = net; need merge two net.
LOG_INFO << "merge " << left_net_name << " = " << right_net_name << "\n";

auto left_pin_ports = the_left_net->get_pin_ports();
@@ -834,11 +836,6 @@ void Sta::linkDesignWithRustParser(const char *top_cell_name) {
LOG_FATAL_IF(!the_left_port) << "the left port is not exist.";
created_net.addPinPort(the_left_port);

} else if (the_left_net && the_right_net && the_left_port &&
the_right_port) {
// assign output_port = output_port
LOG_FATAL_IF(!the_right_port) << "the right port is not exist.";
the_left_net->addPinPort(the_right_port);
} else {
LOG_FATAL << "assign " << left_net_name << " = " << right_net_name
<< " is not processed.";
@@ -3088,11 +3085,11 @@ unsigned Sta::reportTiming(std::set<std::string> &&exclude_cell_names /*= {}*/,
if (0) {
json graph_json;
StaDumpGraphJson dump_graph_json(graph_json);
auto& the_graph = get_graph();
auto &the_graph = get_graph();
dump_graph_json(&the_graph);

std::string graph_json_file_name =
Str::printf("%s/%s_graph.json", design_work_space, get_design_name().c_str());
std::string graph_json_file_name = Str::printf(
"%s/%s_graph.json", design_work_space, get_design_name().c_str());

std::ofstream out_file(graph_json_file_name);
if (out_file.is_open()) {


+ 7
- 6
src/operation/iSTA/source/module/sta/StaDelayPropagation.cc View File

@@ -114,8 +114,13 @@ unsigned StaDelayPropagation::operator()(StaArc* the_arc) {

} else if (the_arc->isDelayArc()) {
auto* rc_net = getSta()->getRcNet(the_net);
auto load_pf = rc_net ? rc_net->load(analysis_mode, trans_type)
: the_net->getLoad(analysis_mode, trans_type);

auto out_trans_type = lib_arc->isNegativeArc()
? flip_trans_type(trans_type)
: trans_type;

auto load_pf = rc_net ? rc_net->load(analysis_mode, out_trans_type)
: the_net->getLoad(analysis_mode, out_trans_type);
auto* the_lib = lib_arc->get_owner_cell()->get_owner_lib();

double load{0};
@@ -125,10 +130,6 @@ unsigned StaDelayPropagation::operator()(StaArc* the_arc) {
load = load_pf;
}

auto out_trans_type = lib_arc->isNegativeArc()
? flip_trans_type(trans_type)
: trans_type;

// fix the timing type not match the trans type, which would lead to
// crash.
if (!lib_arc->isMatchTimingType(out_trans_type)) {


+ 3
- 3
src/operation/iSTA/source/module/sta/StaDump.cc View File

@@ -816,8 +816,8 @@ StaDumpGraphJson::json StaDumpGraphJson::dumpNodeFeature(StaGraph* the_graph) {
auto* the_obj = the_vertex->get_design_obj();
the_obj->isPort() ? one_node_feature_array.push_back(1.0) // is_port
: one_node_feature_array.push_back(0.0);
the_obj->isInput() ? one_node_feature_array.push_back(1.0) // is_input
: one_node_feature_array.push_back(0.0);
the_obj->isInput() ? one_node_feature_array.push_back(0.0) // is_input
: one_node_feature_array.push_back(1.0);
// the distance to 4 die boundary, left, right, top, bottom TBD.
if (the_obj->get_coordinate()) {
auto [pin_x, pin_y] = the_obj->get_coordinate().value();
@@ -840,7 +840,7 @@ StaDumpGraphJson::json StaDumpGraphJson::dumpNodeFeature(StaGraph* the_graph) {
one_node_feature_array.push_back(die_height);
}

// TODO(to taosimin), min or max first? assume max first
// TODO(to taosimin), min or max first? assume min first
double max_rise_cap =
the_vertex->getLoad(AnalysisMode::kMax, TransType::kRise);
double max_fall_cap =


+ 13
- 14
src/operation/iSTA/source/module/sta/StaDump.hh View File

@@ -24,9 +24,9 @@
#pragma once

#include <yaml-cpp/yaml.h>
#include "json/json.hpp"

#include "StaFunc.hh"
#include "json/json.hpp"

namespace ista {

@@ -85,7 +85,7 @@ class StaDumpWireYaml : public StaDumpDelayYaml {
public:
StaDumpWireYaml(std::ofstream& file) : _file(file) {}
~StaDumpWireYaml() override = default;
void set_analysis_mode(AnalysisMode analysis_mode) {
_analysis_mode = analysis_mode;
}
@@ -97,21 +97,20 @@ class StaDumpWireYaml : public StaDumpDelayYaml {
unsigned operator()(StaVertex* the_vertex) override;
unsigned operator()(StaArc* the_arc) override;

private:
private:
std::ofstream& _file;
};

/**
* @brief The class for dump wire data in json text file for training data.
*
*
*/
class StaDumpWireJson : public StaDumpDelayYaml {
public:

using json = nlohmann::ordered_json;
StaDumpWireJson(json& parent_json) : _parent_json(parent_json) {}
~StaDumpWireJson() override = default;
~StaDumpWireJson() override = default;
void set_analysis_mode(AnalysisMode analysis_mode) {
_analysis_mode = analysis_mode;
}
@@ -119,11 +118,11 @@ class StaDumpWireJson : public StaDumpDelayYaml {

void set_trans_type(TransType trans_type) { _trans_type = trans_type; }
auto get_trans_type() { return _trans_type; }
unsigned operator()(StaVertex* the_vertex) override;
unsigned operator()(StaArc* the_arc) override;

private:
private:
json& _parent_json;
};

@@ -138,7 +137,7 @@ class StaDumpGraphViz : public StaFunc {

/**
* @brief The class for dump timing data in memory for python call.
*
*
*/
class StaDumpTimingData : public StaFunc {
public:
@@ -151,7 +150,7 @@ class StaDumpTimingData : public StaFunc {

auto get_wire_timing_datas() { return _wire_timing_datas; }

private:
private:
std::vector<StaWireTimingData> _wire_timing_datas;

AnalysisMode _analysis_mode;
@@ -160,17 +159,16 @@ class StaDumpTimingData : public StaFunc {

/**
* @brief dump the graph json for get graph timing data.
*
*
*/
class StaDumpGraphJson : public StaFunc {
public:
using json = nlohmann::ordered_json;
StaDumpGraphJson(json& json_file) : _json_file(json_file) {}
~StaDumpGraphJson() override = default;
~StaDumpGraphJson() override = default;

unsigned operator()(StaGraph* the_graph) override;

private:
json dumpEdges(StaGraph* the_graph);

json dumpNodeRAT(StaGraph* the_graph);
@@ -186,6 +184,7 @@ class StaDumpGraphJson : public StaFunc {
json dumpNetInArcFeature(StaGraph* the_graph);
json dumpNetOutArcFeature(StaGraph* the_graph);

private:
json& _json_file;
};



+ 32
- 0
src/utility/log/CMakeLists.txt View File

@@ -5,6 +5,38 @@ AUX_SOURCE_DIRECTORY(./ SRC)

SET(LINK_unwind "unwind")

find_package(glog QUIET)
message(STATUS "Detected glog version: ${glog_VERSION}")

if(NOT glog_FOUND)
# Fallback for glog 0.4.0 which doesn't provide CMake config files
find_package(PkgConfig REQUIRED)
pkg_check_modules(GLOG REQUIRED libglog)

# Create a target for glog if not found
if(NOT TARGET glog::glog)
add_library(glog::glog UNKNOWN IMPORTED)
set_target_properties(glog::glog PROPERTIES
IMPORTED_LOCATION "${GLOG_LIBRARIES}"
INTERFACE_INCLUDE_DIRECTORIES "${GLOG_INCLUDE_DIRS}"
INTERFACE_LINK_LIBRARIES "${GLOG_LIBRARIES}"
)
endif()

# Set version manually for glog 0.4.0
set(glog_VERSION "0.4.0" CACHE STRING "glog version" FORCE)
endif()

if(glog_VERSION VERSION_LESS_EQUAL "0.5.0")
# For glog versions 0.5.0 and earlier, the signature of SignalHandle is (const char*, int)
# We define GOOGLE_GLOG_VERSION to 50 to indicate a version like 0.5.0
add_compile_definitions(GOOGLE_GLOG_VERSION=50)
else()
# For glog 0.6.0 and later versions, the signature of SignalHandle is (const char*, size_t)
# We define GOOGLE_GLOG_VERSION to 60 to indicate a version like 0.6.0
add_compile_definitions(GOOGLE_GLOG_VERSION=60)
endif()

# Define GLOG_USE_GLOG_EXPORT for glog 0.7.1+ compatibility
add_definitions(-DGLOG_USE_GLOG_EXPORT)



+ 14
- 1
src/utility/log/Log.cc View File

@@ -33,7 +33,6 @@

using std::string;


namespace ieda {

bool Log::_is_init = false;
@@ -44,7 +43,13 @@ bool Log::_is_init = false;
* @param data
* @param size
*/
#if defined(GOOGLE_GLOG_VERSION) && GOOGLE_GLOG_VERSION < 60
// For glog versions before 0.6.0
void SignalHandle(const char* data, int size)
#else
// For glog 0.6.0 and later versions
void SignalHandle(const char* data, std::size_t size)
#endif
{
std::ofstream fs("glog_dump.log", std::ios::app);
std::string str = std::string(data, size);
@@ -67,6 +72,14 @@ void Log::init(char* argv[], std::string log_dir)
end();
}

#if defined(GOOGLE_GLOG_VERSION) && GOOGLE_GLOG_VERSION < 60
// For glog versions before 0.5.0, do nothing
#else
// For glog 0.5.0 and later versions, set log to stdout
FLAGS_logtostdout = true;
FLAGS_colorlogtostdout = true;
#endif

/*init google logging.*/
google::InitGoogleLogging(argv[0]);



+ 3
- 5
src/utility/memory/MemoryMonitor.cc View File

@@ -6,7 +6,6 @@
#include <unistd.h>
#include <sstream>

// 初始化静态成员
std::mutex MemoryMonitor::file_mutex_;

MemoryMonitor::MemoryMonitor(const std::string& label, const std::string& logFile)
@@ -16,7 +15,6 @@ MemoryMonitor::MemoryMonitor(const std::string& label, const std::string& logFil
start_vm_ = getCurrentVirtual();
start_time_ = std::chrono::high_resolution_clock::now();
// 打开日志文件(追加模式)
std::lock_guard<std::mutex> lock(file_mutex_);
std::ofstream log(logFile_, std::ios::app);
if (log.is_open()) {
@@ -88,7 +86,7 @@ size_t MemoryMonitor::getCurrentRSS() {
statm >> vm >> rss;
statm.close();
}
return rss * sysconf(_SC_PAGESIZE); // 转换页数为字节数
return rss * sysconf(_SC_PAGESIZE);
}

size_t MemoryMonitor::getCurrentVirtual() {
@@ -98,7 +96,7 @@ size_t MemoryMonitor::getCurrentVirtual() {
statm >> vm;
statm.close();
}
return vm * sysconf(_SC_PAGESIZE); // 转换页数为字节数
return vm * sysconf(_SC_PAGESIZE);
}

size_t MemoryMonitor::getPeakRSS() {
@@ -112,7 +110,7 @@ size_t MemoryMonitor::getPeakRSS() {
std::istringstream iss(line.substr(10));
size_t kb;
iss >> kb;
peak_rss = kb * 1024; // 转换KB为字节
peak_rss = kb * 1024;
break;
}
}


+ 2
- 12
src/utility/memory/MemoryMonitor.hh View File

@@ -7,29 +7,19 @@

class MemoryMonitor {
public:
// 构造函数:指定标签和输出文件名
MemoryMonitor(const std::string& label, const std::string& logFile = "memory_monitor.log");
// 析构函数:自动报告内存使用情况
~MemoryMonitor();
// 手动报告内存使用情况
void report();
// 获取当前时间戳
static std::string getTimeStamp();
private:
// 获取当前进程的物理内存使用量(RSS),单位为字节
size_t getCurrentRSS();
// 获取当前进程的虚拟内存使用量,单位为字节

size_t getCurrentVirtual();
// 获取进程的峰值物理内存使用量
size_t getPeakRSS();
// 格式化内存大小显示
std::string formatMemory(size_t bytes);
std::string label_;
@@ -37,7 +27,7 @@ private:
size_t start_memory_;
size_t start_vm_;
std::chrono::high_resolution_clock::time_point start_time_;
static std::mutex file_mutex_; // 保证多线程安全
static std::mutex file_mutex_;
};

#endif // MEMORY_MONITOR_H

+ 2
- 2
src/vectorization/api/vec_api.cpp View File

@@ -47,11 +47,11 @@ std::map<int, VecNet> VectorizationApi::getGraph(std::string path)
return vectorization.getGraph(path);
}

bool VectorizationApi::buildVectorizationFeature(const std::string dir, int patch_row_step, int patch_col_step)
bool VectorizationApi::buildVectorizationFeature(const std::string dir, int patch_row_step, int patch_col_step, bool batch_mode)
{
Vectorization vectorization;

vectorization.buildFeature(dir, patch_row_step, patch_col_step);
vectorization.buildFeature(dir, patch_row_step, patch_col_step, batch_mode);

return true;
}


+ 1
- 1
src/vectorization/api/vec_api.h View File

@@ -29,7 +29,7 @@ class VectorizationApi

bool buildVectorizationLayoutData(const std::string path);
bool buildVectorizationGraphData(const std::string path);
bool buildVectorizationFeature(const std::string dir, int patch_row_step, int patch_col_step);
bool buildVectorizationFeature(const std::string dir, int patch_row_step, int patch_col_step, bool batch_mode = true);

// run the vectorization sta for get timing data.
bool runVecSTA(const std::string dir = "VEC_STA");


+ 2
- 2
src/vectorization/src/data_manager/vec_dm.cpp View File

@@ -78,10 +78,10 @@ std::map<int, VecNet> VecDataManager::getGraph(std::string path)
return layout_dm.get_graph();
}

void VecDataManager::saveData(const std::string dir)
void VecDataManager::saveData(const std::string dir, bool batch_mode)
{
VecLayoutFileIO file_io(dir, &layout_dm.get_layout(), &patch_dm->get_patch_grid());
file_io.saveJson();
file_io.saveJson(batch_mode);
}

bool VecDataManager::readNetsToIDB(std::string dir)


+ 1
- 1
src/vectorization/src/data_manager/vec_dm.h View File

@@ -43,7 +43,7 @@ class VecDataManager
std::map<int, VecNet> getGraph(std::string path);

bool checkData();
void saveData(const std::string dir);
void saveData(const std::string dir, bool batch_mode = true);
bool readNetsToIDB(std::string dir);
bool readNetsPatternToIDB(std::string path);



+ 127
- 80
src/vectorization/src/data_manager/vec_file.cpp View File

@@ -40,9 +40,10 @@ void VecLayoutFileIO::makeDir(std::string dir)
}
}

bool VecLayoutFileIO::saveJson()
bool VecLayoutFileIO::saveJson(bool batch_mode)
{
LOG_INFO << "Vectorization save json start... dir = " << _path;
LOG_INFO << "Batch mode: " << (batch_mode ? "enabled (multiple items per file)" : "disabled (single item per file)");

makeDir(_path);

@@ -56,38 +57,35 @@ bool VecLayoutFileIO::saveJson()
saveJsonInstances();

/// save graph
saveJsonNets();
saveJsonNets(batch_mode);

/// save patch
saveJsonPatchs();
saveJsonPatchs(batch_mode);

LOG_INFO << "Vectorization save json end... dir = " << _path;

return true;
}

bool VecLayoutFileIO::saveJsonNets()
bool VecLayoutFileIO::saveJsonNets(bool batch_mode)
{
ieda::Stats stats;
LOG_INFO << "Vectorization save json net start...";
makeDir(_path + "/nets/");

auto& net_map = _layout->get_graph().get_net_map();
const int BATCH_SIZE = 1500; // 可根据系统性能调整批量大小
const int BATCH_SIZE = 1500; // adjustable batch size based on system performance
const int num_threads = omp_get_max_threads();
const int NETS_PER_FILE = 1000; // 每个文件存储的net数量
const int NETS_PER_FILE = 1000; // each file stores this many nets

// 预先将map的键值对复制到vector中,避免O(N^2)的迭代复杂度
// Pre-copy the map's key-value pairs into a vector to avoid O(N²) iteration complexity
std::vector<std::pair<int, VecNet*>> net_vec;
net_vec.reserve(net_map.size());
for (auto& [net_id, vec_net] : net_map) {
net_vec.emplace_back(net_id, &vec_net);
}

// 计算需要的文件数量
int num_files = (net_vec.size() + NETS_PER_FILE - 1) / NETS_PER_FILE;

// 用于收集所有线程生成的JSON数据
// Collect JSON data generated by all threads
std::vector<std::vector<std::pair<int, json>>> thread_batches(num_threads);

int total = 0;
@@ -95,11 +93,10 @@ bool VecLayoutFileIO::saveJsonNets()
{
int thread_id = omp_get_thread_num();
auto& local_batch = thread_batches[thread_id];
local_batch.reserve(BATCH_SIZE + 100); // 预分配空间
local_batch.reserve(BATCH_SIZE + 100); // preallocate space

#pragma omp for schedule(dynamic, 100) reduction(+ : total)
for (int i = 0; i < (int) net_vec.size(); ++i) {
// 直接O(1)访问vector元素,而不是O(i)的std::advance
const auto& [net_id, vec_net_ptr] = net_vec[i];
auto& vec_net = *vec_net_ptr;
auto* idb_net = dmInst->get_idb_design()->get_net_list()->get_net_list()[net_id];
@@ -265,7 +262,7 @@ bool VecLayoutFileIO::saveJsonNets()
json_net["routing_graph"] = json_routing_graph;
}

// 将结果添加到本地批次中,存储net_id和对应的json数据
// Add the result to the local batch, storing net_id and corresponding json data
local_batch.emplace_back(net_id, std::move(json_net));

if (i % 1000 == 0) {
@@ -277,10 +274,10 @@ bool VecLayoutFileIO::saveJsonNets()
}
}

// 并行区域结束后,合并所有线程的结果
// After the parallel region, merge the results of all threads
LOG_INFO << "JSON generation completed, merging results...";

// 创建一个映射,将net_id映射到对应的json数据
// Create a mapping from net_id to corresponding json data
std::map<int, json> all_nets;
for (const auto& batch : thread_batches) {
for (const auto& [net_id, json_data] : batch) {
@@ -288,41 +285,67 @@ bool VecLayoutFileIO::saveJsonNets()
}
}

// 批量写入文件
LOG_INFO << "Starting batch file writing...";
// 计算需要写入的文件数量
int total_files = (all_nets.size() + NETS_PER_FILE - 1) / NETS_PER_FILE;
if (batch_mode) {
// batch file mode: multiple nets per file
LOG_INFO << "Starting batch file writing...";
int total_files = (all_nets.size() + NETS_PER_FILE - 1) / NETS_PER_FILE;

#pragma omp parallel for schedule(dynamic, 1) num_threads(std::min(num_threads, 8))
for (int file_idx = 0; file_idx < total_files; ++file_idx) {
// 计算当前文件包含的网络范围
int start_net_idx = file_idx * NETS_PER_FILE;
int end_net_idx = std::min((file_idx + 1) * NETS_PER_FILE - 1, (int) all_nets.size() - 1);
for (int file_idx = 0; file_idx < total_files; ++file_idx) {
int start_net_idx = file_idx * NETS_PER_FILE;
int end_net_idx = std::min((file_idx + 1) * NETS_PER_FILE - 1, (int) all_nets.size() - 1);

// 创建文件名格式: net_START_END.json
std::string filename = "net_" + std::to_string(start_net_idx) + "_" + std::to_string(end_net_idx) + ".json";
std::string full_path = _path + "/nets/" + filename;
// create file name format: net_start_end.json
std::string filename = "net_" + std::to_string(start_net_idx) + "_" + std::to_string(end_net_idx) + ".json";
std::string full_path = _path + "/nets/" + filename;

// 创建一个包含当前批次网络的数组
json batch_json = json::array();
json batch_json = json::array();

// 找到这个范围内的所有网络
auto it = all_nets.begin();
std::advance(it, start_net_idx);
auto it = all_nets.begin();
std::advance(it, start_net_idx);

for (int i = start_net_idx; i <= end_net_idx && it != all_nets.end(); ++i, ++it) {
batch_json.push_back(it->second);
}
for (int i = start_net_idx; i <= end_net_idx && it != all_nets.end(); ++i, ++it) {
batch_json.push_back(it->second);
}

std::ofstream file_stream(full_path);
// file_stream << std::setw(4) << batch_json;
file_stream << batch_json;
file_stream.close();
std::ofstream file_stream(full_path);
file_stream << batch_json;
file_stream.close();

#pragma omp critical(log)
{
LOG_INFO << "Writing files: " << (file_idx + 1) * NETS_PER_FILE << " / " << all_nets.size();
{
LOG_INFO << "Writing files: " << (file_idx + 1) * NETS_PER_FILE << " / " << all_nets.size();
}
}
} else {
// individual file mode: one net per file
LOG_INFO << "Starting individual file writing...";
// Convert map to vector for OpenMP parallel processing
std::vector<std::pair<int, json>> net_list;
for (const auto& [net_id, json_data] : all_nets) {
net_list.emplace_back(net_id, json_data);
}

#pragma omp parallel for schedule(dynamic, 10) num_threads(std::min(num_threads, 8))
for (int i = 0; i < (int)net_list.size(); ++i) {
const auto& [net_id, json_data] = net_list[i];
// create file name format: net_id.json
std::string filename = "net_" + std::to_string(net_id) + ".json";
std::string full_path = _path + "/nets/" + filename;
std::ofstream file_stream(full_path);
file_stream << std::setw(4) << json_data;
file_stream.close();
if ((i + 1) % 1000 == 0 || i == (int)net_list.size() - 1) {
#pragma omp critical(log)
{
LOG_INFO << "Writing individual files: " << (i + 1) << " / " << net_list.size();
}
}
}
}

@@ -335,7 +358,7 @@ bool VecLayoutFileIO::saveJsonNets()
return true;
}

bool VecLayoutFileIO::saveJsonPatchs()
bool VecLayoutFileIO::saveJsonPatchs(bool batch_mode)
{
ieda::Stats stats;
LOG_INFO << "Vectorization save json patchs start...";
@@ -346,28 +369,23 @@ bool VecLayoutFileIO::saveJsonPatchs()
}

auto& patchs = _patch_grid->get_patchs();
const int BATCH_SIZE = 1500; // 可根据系统性能调整批量大小
const int BATCH_SIZE = 1500; // adjustable batch size based on system performance
const int num_threads = omp_get_max_threads();
const int PATCHS_PER_FILE = 1000; // 每个文件存储的patch数量
const int PATCHS_PER_FILE = 1000; // each file stores this many patchs

// 预先将map的键值对复制到vector中,避免O(N²)的迭代复杂度
std::vector<std::pair<int, VecPatch*>> patch_vec;
patch_vec.reserve(patchs.size());
for (auto& [patch_id, patch] : patchs) {
patch_vec.emplace_back(patch_id, &patch);
}

// 计算需要的文件数量
int num_files = (patch_vec.size() + PATCHS_PER_FILE - 1) / PATCHS_PER_FILE;

// 用于收集所有线程生成的JSON数据
std::vector<std::vector<std::pair<int, json>>> thread_batches(num_threads);

#pragma omp parallel
{
int thread_id = omp_get_thread_num();
auto& local_batch = thread_batches[thread_id];
local_batch.reserve(BATCH_SIZE + 100); // 预分配空间
local_batch.reserve(BATCH_SIZE + 100);

#pragma omp for schedule(dynamic, 100)
for (int i = 0; i < (int) patch_vec.size(); ++i) {
@@ -509,7 +527,7 @@ bool VecLayoutFileIO::saveJsonPatchs()
json_patch["patch_layer"] = json_layers;
}

// 将结果添加到本地批次中,存储patch_id和对应的json数据
// Add the result to the local batch, storing patch_id and corresponding json data
local_batch.emplace_back(patch_id, std::move(json_patch));

if (i % 1000 == 0) {
@@ -521,10 +539,8 @@ bool VecLayoutFileIO::saveJsonPatchs()
}
}

// 并行区域结束后,合并所有线程的结果
LOG_INFO << "JSON generation completed, merging results...";

// 创建一个映射,将patch_id映射到对应的json数据
std::map<int, json> all_patches;
for (const auto& batch : thread_batches) {
for (const auto& [patch_id, json_data] : batch) {
@@ -532,40 +548,64 @@ bool VecLayoutFileIO::saveJsonPatchs()
}
}

// 批量写入文件
LOG_INFO << "Starting batch file writing...";

// 计算需要写入的文件数量
int total_files = (all_patches.size() + PATCHS_PER_FILE - 1) / PATCHS_PER_FILE;
if (batch_mode) {
LOG_INFO << "Starting batch file writing...";
int total_files = (all_patches.size() + PATCHS_PER_FILE - 1) / PATCHS_PER_FILE;

#pragma omp parallel for schedule(dynamic, 1) num_threads(std::min(num_threads, 8))
for (int file_idx = 0; file_idx < total_files; ++file_idx) {
// 计算当前文件包含的patch范围
int start_patch_idx = file_idx * PATCHS_PER_FILE;
int end_patch_idx = std::min((file_idx + 1) * PATCHS_PER_FILE - 1, (int) all_patches.size() - 1);
for (int file_idx = 0; file_idx < total_files; ++file_idx) {
int start_patch_idx = file_idx * PATCHS_PER_FILE;
int end_patch_idx = std::min((file_idx + 1) * PATCHS_PER_FILE - 1, (int) all_patches.size() - 1);

// create a file name format: patch_start_end.json
std::string filename = "patch_" + std::to_string(start_patch_idx) + "_" + std::to_string(end_patch_idx) + ".json";
std::string full_path = _path + "/patchs/" + filename;

// 创建文件名格式: patch_START_END.json
std::string filename = "patch_" + std::to_string(start_patch_idx) + "_" + std::to_string(end_patch_idx) + ".json";
std::string full_path = _path + "/patchs/" + filename;
json batch_json = json::array();

// 创建一个包含当前批次patch的数组
json batch_json = json::array();
auto it = all_patches.begin();
std::advance(it, start_patch_idx);

for (int i = start_patch_idx; i <= end_patch_idx && it != all_patches.end(); ++i, ++it) {
batch_json.push_back(it->second);
}

// 找到这个范围内的所有patch
auto it = all_patches.begin();
std::advance(it, start_patch_idx);
std::ofstream file_stream(full_path);
file_stream << batch_json;
file_stream.close();

for (int i = start_patch_idx; i <= end_patch_idx && it != all_patches.end(); ++i, ++it) {
batch_json.push_back(it->second);
#pragma omp critical(log)
{
LOG_INFO << "Writing files: " << (file_idx + 1) * PATCHS_PER_FILE << " / " << all_patches.size();
}
}
} else {
LOG_INFO << "Starting individual file writing...";
std::vector<std::pair<int, json>> patch_list;
for (const auto& [patch_id, json_data] : all_patches) {
patch_list.emplace_back(patch_id, json_data);
}

std::ofstream file_stream(full_path);
file_stream << batch_json;
file_stream.close();
#pragma omp parallel for schedule(dynamic, 10) num_threads(std::min(num_threads, 8))
for (int i = 0; i < (int) patch_list.size(); ++i) {
const auto& [patch_id, json_data] = patch_list[i];

// create file name format: patch_id.json
std::string filename = "patch_" + std::to_string(patch_id) + ".json";
std::string full_path = _path + "/patchs/" + filename;

std::ofstream file_stream(full_path);
file_stream << std::setw(4) << json_data;
file_stream.close();

#pragma omp critical(log)
{
LOG_INFO << "Writing files: " << (file_idx + 1) * PATCHS_PER_FILE << " / " << all_patches.size();
{
if ((i + 1) % 100 == 0 || i == (int) patch_list.size() - 1) {
LOG_INFO << "Writing individual files: " << (i + 1) << " / " << patch_list.size();
}
}
}
}

@@ -950,8 +990,11 @@ bool VecLayoutFileIO::readJsonNetsPattern()
}

std::string layer_metal = _layout->findLayerName(layer_index_start);
auto* idb_layer_metal = idb_layers->find_layer(layer_metal);
if (layer_metal == "") {
continue;
}

auto* idb_layer_metal = idb_layers->find_layer(layer_metal);
auto* idb_segment = idb_wire->add_segment();

idb_segment->set_layer(idb_layer_metal);
@@ -968,8 +1011,12 @@ bool VecLayoutFileIO::readJsonNetsPattern()

for (auto layer_order = bottom_order; layer_order <= top_order; layer_order += 2) {
std::string bottom_layer_name = _layout->findLayerName(layer_order);
auto* bottom_layer = idb_layers->find_layer(bottom_layer_name);
std::string top_layer_name = _layout->findLayerName(layer_order + 2);
if (bottom_layer_name == "" || top_layer_name == "") {
continue;
}

auto* bottom_layer = idb_layers->find_layer(bottom_layer_name);
auto* top_layer = idb_layers->find_layer(top_layer_name);

auto* idb_segment = idb_wire->add_segment();


+ 3
- 3
src/vectorization/src/data_manager/vec_file.h View File

@@ -38,7 +38,7 @@ class VecLayoutFileIO
}
~VecLayoutFileIO() {}

bool saveJson();
bool saveJson(bool batch_mode = true);
bool readJsonNets();
bool readJsonNetsPattern();

@@ -47,8 +47,8 @@ class VecLayoutFileIO
VecLayout* _layout = nullptr;
VecPatchGrid* _patch_grid = nullptr;

bool saveJsonNets();
bool saveJsonPatchs();
bool saveJsonNets(bool batch_mode = true);
bool saveJsonPatchs(bool batch_mode = true);
bool saveJsonTech();
bool saveJsonCells();
bool saveJsonInstances();


+ 2
- 2
src/vectorization/src/vectorization.cpp View File

@@ -70,7 +70,7 @@ std::map<int, VecNet> Vectorization::getGraph(std::string path)
return _data_manager.getGraph(path);
}

void Vectorization::buildFeature(const std::string dir, int patch_row_step, int patch_col_step)
void Vectorization::buildFeature(const std::string dir, int patch_row_step, int patch_col_step, bool batch_mode)
{
{
/// build layout data
@@ -100,7 +100,7 @@ void Vectorization::buildFeature(const std::string dir, int patch_row_step, int
generateFeature(dir);

/// save
_data_manager.saveData(dir);
_data_manager.saveData(dir, batch_mode);
}

void Vectorization::generateFeature(const std::string dir)


+ 1
- 1
src/vectorization/src/vectorization.h View File

@@ -32,7 +32,7 @@ class Vectorization
bool buildGraphData(const std::string path);
bool buildGraphDataWithoutSave(const std::string path);
std::map<int, VecNet> getGraph(std::string path);
void buildFeature(const std::string dir, int patch_row_step, int patch_col_step);
void buildFeature(const std::string dir, int patch_row_step, int patch_col_step, bool batch_mode);
bool buildPatchData(const std::string dir);
bool buildPatchData(const std::string dir, int patch_row_step, int patch_col_step);



Loading…
Cancel
Save
Baidu
map