37 Commits

Author SHA1 Message Date
  Emin fd2edab9b8
!75 monthly merge 3 months ago
  YihangQiu 4ec18fcce0 merge 3 months ago
  YihangQiu 61332e4fc5 fix: Disable AI inference by default in iPL 3 months ago
  simintao 048c279c87 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao a53e8d1f90 feature:support dump graph json for timing predict contest 3 months ago
  Yell-walkalone 184b30b67c Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  YihangQiu 9e3870c1fc Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  YihangQiu 55a90919b5 fix: ista.cmake 3 months ago
  Yell-walkalone 30661fc003 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao e0a39c9a0e feature:support dump pin loca 3 months ago
  Yell-walkalone 9cbcbc7915 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao b81ce39cd5 fix:build 3 months ago
  Yell-walkalone 7d153907d8 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  Yell-walkalone 38dcf25a51 add nets pattern to idb 3 months ago
  simintao 5dd22a968f feature:support dump net delay and inst delay 3 months ago
  simintao 82c97b5a43 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  simintao ef934a2ffb feature:support dump timint predict feature 3 months ago
  Yell-walkalone 6921d2891a Merge branch 'master' of gitee.com:ieda-ipd/iEDA 3 months ago
  Yell-walkalone 56e792a4fe add vectors nets transform to idb 3 months ago
  YihangQiu f993356116 update 3 months ago
  YihangQiu 8d87b66433 merge 3 months ago
  YihangQiu d5d155cd7f feat: AI-based placement optimization demo 3 months ago
  simintao 71bec79442 fix:spef use $ character 3 months ago
  simintao 2829c4d7ea refactor:support order json 3 months ago
  simintao 1d17ea8ae2 refactor:mask print 3 months ago
  simintao 2ab8fc55a4 feature:support lib arc set find value 3 months ago
  simintao ac8597ba10 feature:print ir data in log 3 months ago
  simintao 8d6eceb84a feature:support print run time in rpt 3 months ago
  simintao 1dd369323a fix:cap calc first route id 3 months ago
  simintao 116b4d6874 fix:pin port name 4 months ago
  simintao 56b5bfefb1 feature:add log 4 months ago
  simintao 0a853bcbc3 Merge branch 'master' of gitee.com:ieda-ipd/iEDA 4 months ago
  simintao 48d22ba1fa feature:support get power for wire power 4 months ago
  Emin d571ea50c4 !79 fix(build): enable position independent code for shared libraries 4 months ago
  Emin a21612dd2c !80 build: set default build type to Release if not defined 4 months ago
  simintao 3917fd0beb Merge branch 'master' of gitee.com:ieda-ipd/iEDA 4 months ago
  simintao 6ff89db3f8 refactor:remove unused code 4 months ago
83 changed files with 2646 additions and 165 deletions
Split View
  1. +8
    -1
      CMakeLists.txt
  2. +1
    -1
      src/database/data/design/db_design/IdbNet.h
  3. +8
    -3
      src/database/data/design/db_design/IdbVias.cpp
  4. +24
    -0
      src/database/manager/parser/liberty/Lib.cc
  5. +3
    -0
      src/database/manager/parser/liberty/Lib.hh
  6. +1
    -1
      src/database/manager/parser/spef/spef-parser/src/spef_parser/grammar/spef.pest
  7. +7
    -0
      src/interface/python/py_ipl/py_ipl.cpp
  8. +1
    -0
      src/interface/python/py_ipl/py_ipl.h
  9. +1
    -0
      src/interface/python/py_ipl/py_register_ipl.h
  10. +89
    -26
      src/interface/python/py_ipw/py_ipw.cpp
  11. +18
    -1
      src/interface/python/py_ipw/py_ipw.h
  12. +16
    -2
      src/interface/python/py_ipw/py_register_ipw.h
  13. +4
    -4
      src/interface/python/py_ista/py_ista.cpp
  14. +2
    -2
      src/interface/python/py_ista/py_ista.h
  15. +2
    -2
      src/interface/python/py_ista/py_register_ista.h
  16. +8
    -0
      src/interface/python/py_vec/CMakeLists.txt
  17. +2
    -0
      src/interface/python/py_vec/py_register_vec.h
  18. +18
    -0
      src/interface/python/py_vec/py_vec.cpp
  19. +2
    -0
      src/interface/python/py_vec/py_vec.h
  20. +2
    -0
      src/interface/tcl/tcl_vec/tcl_register_vec.h
  21. +73
    -5
      src/interface/tcl/tcl_vec/tcl_vec.cpp
  22. +28
    -0
      src/interface/tcl/tcl_vec/tcl_vec.h
  23. +2
    -0
      src/operation/iPA/api/Power.cc
  24. +76
    -0
      src/operation/iPL/api/PLAPI.cc
  25. +4
    -0
      src/operation/iPL/api/PLAPI.hh
  26. +14
    -3
      src/operation/iPL/source/module/CMakeLists.txt
  27. +13
    -0
      src/operation/iPL/source/module/ai_predictor/CMakeLists.txt
  28. +17
    -0
      src/operation/iPL/source/module/ai_predictor/wirelength/CMakeLists.txt
  29. +143
    -0
      src/operation/iPL/source/module/ai_predictor/wirelength/normalization_handler.cc
  30. +63
    -0
      src/operation/iPL/source/module/ai_predictor/wirelength/normalization_handler.hh
  31. +210
    -0
      src/operation/iPL/source/module/ai_predictor/wirelength/onnx_model_handler.cc
  32. +60
    -0
      src/operation/iPL/source/module/ai_predictor/wirelength/onnx_model_handler.hh
  33. +146
    -0
      src/operation/iPL/source/module/ai_predictor/wirelength/wirelength_predictor.cc
  34. +64
    -0
      src/operation/iPL/source/module/ai_predictor/wirelength/wirelength_predictor.hh
  35. +1
    -0
      src/operation/iPL/source/module/detail_placer/CMakeLists.txt
  36. +70
    -2
      src/operation/iPL/source/module/detail_placer/DetailPlacer.cc
  37. +15
    -0
      src/operation/iPL/source/module/detail_placer/DetailPlacer.hh
  38. +147
    -0
      src/operation/iPL/source/module/evaluator/wirelength/AIWirelength.cc
  39. +78
    -0
      src/operation/iPL/source/module/evaluator/wirelength/AIWirelength.hh
  40. +26
    -3
      src/operation/iPL/source/module/evaluator/wirelength/CMakeLists.txt
  41. +17
    -10
      src/operation/iSTA/api/TimingIDBAdapter.cc
  42. +1
    -1
      src/operation/iSTA/api/TimingIDBAdapter.hh
  43. +24
    -0
      src/operation/iSTA/source/module/delay/ElmoreDelayCalc.cc
  44. +2
    -0
      src/operation/iSTA/source/module/delay/ElmoreDelayCalc.hh
  45. +6
    -0
      src/operation/iSTA/source/module/netlist/DesignObject.hh
  46. +2
    -4
      src/operation/iSTA/source/module/netlist/Instance.hh
  47. +9
    -9
      src/operation/iSTA/source/module/netlist/Netlist.hh
  48. +5
    -0
      src/operation/iSTA/source/module/netlist/Pin.hh
  49. +3
    -1
      src/operation/iSTA/source/module/netlist/Port.cc
  50. +6
    -1
      src/operation/iSTA/source/module/netlist/Port.hh
  51. +1
    -1
      src/operation/iSTA/source/module/python-api/PythonSta.cc
  52. +4
    -4
      src/operation/iSTA/source/module/python-api/PythonSta.hh
  53. +26
    -1
      src/operation/iSTA/source/module/sta/Sta.cc
  54. +20
    -18
      src/operation/iSTA/source/module/sta/Sta.hh
  55. +4
    -5
      src/operation/iSTA/source/module/sta/StaArc.cc
  56. +5
    -3
      src/operation/iSTA/source/module/sta/StaArc.hh
  57. +4
    -3
      src/operation/iSTA/source/module/sta/StaBuildGraph.cc
  58. +9
    -2
      src/operation/iSTA/source/module/sta/StaDataSlewDelayPropagation.cc
  59. +11
    -3
      src/operation/iSTA/source/module/sta/StaDelayPropagation.cc
  60. +512
    -0
      src/operation/iSTA/source/module/sta/StaDump.cc
  61. +31
    -0
      src/operation/iSTA/source/module/sta/StaDump.hh
  62. +13
    -14
      src/operation/iSTA/source/module/sta/StaReport.cc
  63. +2
    -0
      src/operation/iSTA/source/module/sta/StaReport.hh
  64. +1
    -1
      src/operation/iSTA/source/module/sta/StaVertex.cc
  65. +20
    -0
      src/platform/tool_manager/tool_api/ipl_io/ipl_io.cpp
  66. +1
    -0
      src/platform/tool_manager/tool_api/ipl_io/ipl_io.h
  67. +6
    -0
      src/platform/tool_manager/tool_manager.cpp
  68. +1
    -0
      src/platform/tool_manager/tool_manager.h
  69. +6
    -0
      src/third_party/CMakeLists.txt
  70. +17
    -1
      src/vectorization/api/vec_api.cpp
  71. +3
    -0
      src/vectorization/api/vec_api.h
  72. +3
    -0
      src/vectorization/database/vec_net.h
  73. +10
    -7
      src/vectorization/database/vec_node.h
  74. +2
    -0
      src/vectorization/src/data_manager/CMakeLists.txt
  75. +12
    -0
      src/vectorization/src/data_manager/vec_dm.cpp
  76. +2
    -0
      src/vectorization/src/data_manager/vec_dm.h
  77. +337
    -12
      src/vectorization/src/data_manager/vec_file.cpp
  78. +6
    -3
      src/vectorization/src/data_manager/vec_file.h
  79. +9
    -2
      src/vectorization/src/layout/data_manager/vec_layout_init.cpp
  80. +1
    -1
      src/vectorization/src/layout/data_manager/vec_layout_init.h
  81. +2
    -1
      src/vectorization/src/layout/database/vec_layout.h
  82. +20
    -0
      src/vectorization/src/vectorization.cpp
  83. +3
    -1
      src/vectorization/src/vectorization.h

+ 8
- 1
CMakeLists.txt View File

@@ -23,7 +23,10 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall -Werror=return-type")
set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb -Werror=return-type")
set(CMAKE_BUILD_TYPE "Release")

if(NOT DEFINED CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "")
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default: Release)" FORCE)
endif()

if(DEFINED BUILD_AIEDA)
option(BUILD_STATIC_LIB "Build static library OFF" OFF)
@@ -36,6 +39,7 @@ option(USE_PROFILER "Enable performance profiling (default OFF)" OFF)
option(SANITIZER "Enable address sanitizer (default OFF)" OFF)
option(BUILD_GUI "Enable GUI components (default OFF)" OFF)
option(USE_GPU "Enable GPU acceleration (default OFF)" OFF)
option(BUILD_AI_PREDICTOR "Enable AI predictor modules (default OFF)" OFF)
option(COMPATIBILITY_MODE "Enable compatibility mode (disable aggressive optimizations)" ON)

# Define GLOG_USE_GLOG_EXPORT for glog 0.7.1+ compatibility
@@ -63,6 +67,9 @@ if (BUILD_STATIC_LIB)
set(BUILD_SHARED_LIBS OFF CACHE BOOL "Disable shared libs" FORCE)
set(CMAKE_POSITION_INDEPENDENT_CODE OFF)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc")
else()
# When building shared libraries, ensure position independent code
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()

if(NOT COMPATIBILITY_MODE)


+ 1
- 1
src/database/data/design/db_design/IdbNet.h View File

@@ -192,7 +192,7 @@ class IdbNet : public IdbObject
int32_t _xtalk;
// NONDEFAULTRULE
IdbInstanceType _source_type;
IdbConnectType _connect_type;
IdbConnectType _connect_type = IdbConnectType::kSignal;
bool _fix_bump;
double _frequency;
IdbCoordinate<int32_t>* _average_coordinate;


+ 8
- 3
src/database/data/design/db_design/IdbVias.cpp View File

@@ -310,9 +310,14 @@ IdbVia* IdbVias::createVia(string via_name, IdbLayerCut* layer_cut, int32_t widt
if (via_rule == nullptr)
return nullptr;

std::pair<int32_t, int32_t> row_col_pair = calculateRowsCols(layer_cut, width_design, height_design);
int32_t rows = row_col_pair.first;
int32_t cols = row_col_pair.second;
int32_t rows = 1;
int32_t cols = 1;

if (width_design != 0 && height_design != 0) {
std::pair<int32_t, int32_t> row_col_pair = calculateRowsCols(layer_cut, width_design, height_design);
rows = row_col_pair.first;
cols = row_col_pair.second;
}

IdbLayerRouting* layer_bottom = via_rule->get_layer_bottom();
IdbLayerRouting* layer_top = via_rule->get_layer_top();


+ 24
- 0
src/database/manager/parser/liberty/Lib.cc View File

@@ -1316,6 +1316,30 @@ LibArcSet& LibArcSet::operator=(LibArcSet&& rhs) noexcept
return *this;
}

/**
* @brief get delay or constrain arc set value, should contain value vec.
*
* @param trans_type
* @param slew
* @param load_or_constrain_slew
* @return std::vector<double>
*/
std::vector<double> LibArcSet::getDelayOrConstrainCheckNs(TransType trans_type, double slew, double load_or_constrain_slew) {
std::vector<double> values;
// LOG_INFO_IF_EVERY_N(_arcs.size() > 1, 100) << "arc set size is " << _arcs.size();

for (auto& lib_arc : _arcs) {
double find_value = lib_arc->getDelayOrConstrainCheckNs(trans_type, slew, load_or_constrain_slew);
values.push_back(find_value);
}

// sort by descending.
std::ranges::sort(values, std::greater<double>());

return values;

}

LibPowerArc::LibPowerArc() : _owner_cell(nullptr)
{
}


+ 3
- 0
src/database/manager/parser/liberty/Lib.hh View File

@@ -392,6 +392,7 @@ class LibDelayTableModel final : public LibTableModel
}

LibTable* getTable(int index) override { return _tables[index].get(); }
auto& get_tables() { return _tables; }

unsigned addCurrentTable(std::unique_ptr<LibCCSTable>&& table)
{
@@ -911,6 +912,8 @@ class LibArcSet
LibArc* front() { return _arcs.front().get(); }
auto& get_arcs() { return _arcs; }

std::vector<double> getDelayOrConstrainCheckNs(TransType trans_type, double slew, double load_or_constrain_slew);

private:
Vector<std::unique_ptr<LibArc>> _arcs;



+ 1
- 1
src/database/manager/parser/spef/spef-parser/src/spef_parser/grammar/spef.pest View File

@@ -10,7 +10,7 @@ multiline_comment = _{ "/*" ~ (!"*/" ~ ANY)* ~ "*/" }
COMMENT = _{ line_comment | multiline_comment }

header_char = _{ ASCII_ALPHANUMERIC | "_" | "\\" | "/" | "[" | "]" | "," | "\"" | "-" | ":" | "." }
char = _{ ASCII_ALPHANUMERIC | "_" | "\\" | "/" | "[" | "]" | "," | "\"" }
char = _{ ASCII_ALPHANUMERIC | "_" | "\\" | "/" | "[" | "]" | "," | "\"" | "$" }

section = ${ "*" ~ section_name }
section_name = ${ "NAME_MAP" | "PORTS" | "CONN" | "CAP" | "RES" | "END" }


+ 7
- 0
src/interface/python/py_ipl/py_ipl.cpp View File

@@ -60,6 +60,13 @@ bool placerReport()
return run_ok;
}

bool placerAiRun(const std::string& config, const std::string& onnx_path, const std::string& normalization_path)
{
bool run_ok = iplf::tmInst->runAiPlacer(config, onnx_path, normalization_path);
return run_ok;
}


bool placerInit(const std::string& config)
{
auto* inst = iplf::PlacerIO::getInstance();


+ 1
- 0
src/interface/python/py_ipl/py_ipl.h View File

@@ -24,6 +24,7 @@ bool placerIncrementalFlow(const std::string& config);
bool placerIncrementalLG();
bool placerCheckLegality();
bool placerReport();
bool placerAiRun(const std::string& config, const std::string& onnx_path, const std::string& normalization_path);

bool placerInit(const std::string& config);
bool placerDestroy();


+ 1
- 0
src/interface/python/py_ipl/py_register_ipl.h View File

@@ -28,6 +28,7 @@ void register_ipl(py::module& m)
m.def("run_filler", placerRunFiller, py::arg("config"));
m.def("run_incremental_flow", placerIncrementalFlow, py::arg("config"));
m.def("run_incremental_lg", placerIncrementalLG);
m.def("run_ai_placement", placerAiRun, py::arg("config"), py::arg("onnx_path"), py::arg("normalization_path"));

m.def("init_pl", placerInit, py::arg("config"));
m.def("destroy_pl", placerDestroy);


+ 89
- 26
src/interface/python/py_ipw/py_ipw.cpp View File

@@ -23,28 +23,21 @@
#include "sta/Sta.hh"

namespace python_interface {
bool readRustVCD(const char* vcd_path, const char* top_instance_name) {
bool readRustVCD(const char* vcd_path, const char* top_instance_name)
{
ista::Sta* ista = ista::Sta::getOrCreateSta();
ipower::Power* ipower = ipower::Power::getOrCreatePower(&(ista->get_graph()));

return ipower->readRustVCD(vcd_path, top_instance_name);
}

unsigned reportPower() {
Sta* ista = Sta::getOrCreateSta();
ipower::Power* ipower = ipower::Power::getOrCreatePower(&(ista->get_graph()));

ipower->runCompleteFlow();

return 1;
}

/**
* @brief interface for python of report power.
*
* @return unsigned
*/
unsigned report_power() {
unsigned report_power()
{
ista::Sta* ista = ista::Sta::getOrCreateSta();
ipower::Power* ipower = ipower::Power::getOrCreatePower(&(ista->get_graph()));

@@ -54,12 +47,13 @@ unsigned report_power() {

/**
* @brief interface for python of read pg spef.
*
* @param pg_spef_file
* @return true
* @return false
*
* @param pg_spef_file
* @return true
* @return false
*/
bool read_pg_spef(std::string pg_spef_file) {
bool read_pg_spef(std::string pg_spef_file)
{
ista::Sta* ista = ista::Sta::getOrCreateSta();
ipower::Power* ipower = ipower::Power::getOrCreatePower(&(ista->get_graph()));

@@ -68,11 +62,12 @@ bool read_pg_spef(std::string pg_spef_file) {

/**
* @brief interface for python of report ir drop.
*
* @param power_net_name
* @return unsigned
*
* @param power_net_name
* @return unsigned
*/
unsigned report_ir_drop(std::vector<std::string> power_nets) {
unsigned report_ir_drop(std::vector<std::string> power_nets)
{
auto* power_engine = ipower::PowerEngine::getOrCreatePowerEngine();

for (auto power_net_name : power_nets) {
@@ -84,20 +79,21 @@ unsigned report_ir_drop(std::vector<std::string> power_nets) {
return 1;
}

unsigned create_data_flow() {
unsigned create_data_flow()
{
auto* power_engine = ipower::PowerEngine::getOrCreatePowerEngine();
return power_engine->creatDataflow();
}

std::map<std::size_t, std::vector<ipower::ClusterConnection>>
build_connection_map(std::vector<std::set<std::string>> clusters, std::set<std::string> src_instances,
unsigned max_hop) {
std::map<std::size_t, std::vector<ipower::ClusterConnection>> build_connection_map(std::vector<std::set<std::string>> clusters,
std::set<std::string> src_instances, unsigned max_hop)
{
auto* power_engine = ipower::PowerEngine::getOrCreatePowerEngine();
return power_engine->buildConnectionMap(clusters, src_instances, max_hop);
}

std::vector<ipower::MacroConnection> build_macro_connection_map(unsigned max_hop) {
std::vector<ipower::MacroConnection> build_macro_connection_map(unsigned max_hop)
{
auto* power_engine = ipower::PowerEngine::getOrCreatePowerEngine();
#ifdef USE_GPU
return power_engine->buildMacroConnectionMapWithGPU(max_hop);
@@ -106,4 +102,71 @@ std::vector<ipower::MacroConnection> build_macro_connection_map(unsigned max_hop
#endif
}

std::vector<PathWireTimingPowerData> get_wire_timing_power_data(unsigned n_worst_path_per_clock)
{
auto* ista = ista::Sta::getOrCreateSta();
auto path_wire_timing_data = ista->reportTimingData(n_worst_path_per_clock);
auto* power_engine = ipower::PowerEngine::getOrCreatePowerEngine();
auto get_net_name = [](const std::string& pin_port_name) {
auto* ista = ista::Sta::getOrCreateSta();
auto objs = ista->get_netlist()->findObj(pin_port_name.c_str(), false, false);
LOG_FATAL_IF(objs.size() != 1);

auto* pin_or_port = objs[0];
std::string net_name = pin_or_port->get_net()->get_name();

return net_name;
};

std::vector<PathWireTimingPowerData> ret_timing_data;
std::string net_name;
double net_toggle = 0.0;
double vdd = 0.0;
for (auto& one_path_wire_timing_data : path_wire_timing_data) {
PathWireTimingPowerData ret_one_path_data;
for (auto& wire_timing_data : one_path_wire_timing_data) {
WireTimingPowerData ret_wire_data;
ret_wire_data._from_node_name = std::move(wire_timing_data._from_node_name);
ret_wire_data._to_node_name = std::move(wire_timing_data._to_node_name);
ret_wire_data._wire_resistance = wire_timing_data._wire_resistance;
ret_wire_data._wire_capacitance = wire_timing_data._wire_capacitance;
ret_wire_data._wire_delay = wire_timing_data._wire_delay;
ret_wire_data._wire_from_slew = wire_timing_data._wire_from_slew;
ret_wire_data._wire_to_slew = wire_timing_data._wire_to_slew;

// for power
std::string& pin_port_name = ret_wire_data._from_node_name;
auto pin_port_name_vec = Str::split(pin_port_name.c_str(), ":");
bool is_pin_port = true;
// judge if the pin_port_name is pin or port.
if (pin_port_name_vec.size() == 2) {
std::string second_name = pin_port_name_vec[1];
if (std::isdigit(second_name[0])) {
is_pin_port = false;
}
}
// update toggle and vdd
if (is_pin_port) {
// LOG_INFO << "update toggle and vdd for pin: " << pin_port_name;
net_name = get_net_name(pin_port_name);
auto [toggle, voltage] = power_engine->get_power()->getNetToggleAndVoltageData(net_name.c_str());
net_toggle = toggle;
vdd = voltage;
}

// calculate wire power
ret_wire_data._wire_power = 0.5 * net_toggle * vdd * wire_timing_data._wire_capacitance;

ret_one_path_data.emplace_back(std::move(ret_wire_data));
}

ret_timing_data.emplace_back(std::move(ret_one_path_data));
}

LOG_INFO << "get wire timing power data size: " << ret_timing_data.size();

return ret_timing_data;
}

} // namespace python_interface

+ 18
- 1
src/interface/python/py_ipw/py_ipw.h View File

@@ -23,10 +23,25 @@
#include "api/PowerEngine.hh"

namespace python_interface {

struct WireTimingPowerData
{
std::string _from_node_name;
std::string _to_node_name;
double _wire_resistance;
double _wire_capacitance;
double _wire_from_slew;
double _wire_to_slew;
double _wire_delay;
double _wire_power;
};

using PathWireTimingPowerData = std::vector<WireTimingPowerData>;

bool readRustVCD(const char* vcd_path, const char* top_instance_name);
bool read_pg_spef(std::string pg_spef_file);

unsigned reportPower();
unsigned report_power();
unsigned report_ir_drop(std::vector<std::string> power_nets);

// for dataflow.
@@ -38,4 +53,6 @@ build_connection_map(std::vector<std::set<std::string>> clusters,

std::vector<ipower::MacroConnection> build_macro_connection_map(unsigned max_hop);

std::vector<PathWireTimingPowerData> get_wire_timing_power_data(unsigned n_worst_path_per_clock);

} // namespace python_interface

+ 16
- 2
src/interface/python/py_ipw/py_register_ipw.h View File

@@ -30,7 +30,8 @@ void register_ipw(py::module& m)
{
m.def("read_vcd_cpp", &readRustVCD, py::arg("file_name"), py::arg("top_name"));
m.def("read_pg_spef", &read_pg_spef, py::arg("pg_spef_file"));
m.def("report_power_cpp", &reportPower);
m.def("report_power_cpp", &report_power);
m.def("report_power", &report_power);
m.def("report_ir_drop", &report_ir_drop, py::arg("power_nets"));

// for dataflow.
@@ -43,13 +44,26 @@ void register_ipw(py::module& m)
m.def("build_connection_map", &build_connection_map);


py::class_<ipower::MacroConnection>(m, "MacroConnection")
py::class_<ipower::MacroConnection>(m, "MacroConnection")
.def_readwrite("src_macro_name", &ipower::MacroConnection::_src_macro_name)
.def_readwrite("dst_macro_name", &ipower::MacroConnection::_dst_macro_name)
.def_readwrite("stages_each_hop", &ipower::MacroConnection::_stages_each_hop)
.def_readwrite("hop", &ipower::MacroConnection::_hop);
m.def("build_macro_connection_map", &build_macro_connection_map);

// get wire timing data
py::class_<WireTimingPowerData>(m, "WireTimingPowerData")
.def_readwrite("from_node_name", &WireTimingPowerData::_from_node_name)
.def_readwrite("to_node_name", &WireTimingPowerData::_to_node_name)
.def_readwrite("wire_resistance", &WireTimingPowerData::_wire_resistance)
.def_readwrite("wire_capacitance", &WireTimingPowerData::_wire_capacitance)
.def_readwrite("wire_from_slew", &WireTimingPowerData::_wire_from_slew)
.def_readwrite("wire_to_slew", &WireTimingPowerData::_wire_to_slew)
.def_readwrite("wire_delay", &WireTimingPowerData::_wire_delay)
.def_readwrite("wire_power", &WireTimingPowerData::_wire_power);

m.def("get_wire_timing_power_data", get_wire_timing_power_data);

}

}

+ 4
- 4
src/interface/python/py_ista/py_ista.cpp View File

@@ -106,18 +106,18 @@ std::string getNetName(const std::string& pin_port_name)
return net_name;
}

double getSegmentResistance(int layer_id, double segment_length) {
double getSegmentResistance(int layer_id, double segment_length, int route_layer_id) {
auto* timing_engine = ista::TimingEngine::getOrCreateTimingEngine();
auto* idb_adapter = dynamic_cast<ista::TimingIDBAdapter*>(timing_engine->get_db_adapter());
double resistance = idb_adapter->getResistance(layer_id, segment_length, std::nullopt);
double resistance = idb_adapter->getResistance(layer_id, segment_length, std::nullopt, route_layer_id);

return resistance;
}

double getSegmentCapacitance(int layer_id, double segment_length) {
double getSegmentCapacitance(int layer_id, double segment_length, int route_layer_id) {
auto* timing_engine = ista::TimingEngine::getOrCreateTimingEngine();
auto* idb_adapter = dynamic_cast<ista::TimingIDBAdapter*>(timing_engine->get_db_adapter());
double capacitance = idb_adapter->getCapacitance(layer_id, segment_length, std::nullopt);
double capacitance = idb_adapter->getCapacitance(layer_id, segment_length, std::nullopt, route_layer_id);

return capacitance;
}


+ 2
- 2
src/interface/python/py_ista/py_ista.h View File

@@ -58,8 +58,8 @@ bool readSdc(const std::string& file_name);

std::string getNetName(const std::string& pin_port_name);

double getSegmentResistance(int layer_id, double segment_length);
double getSegmentCapacitance(int layer_id, double segment_length);
double getSegmentResistance(int layer_id, double segment_length, int route_layer_id);
double getSegmentCapacitance(int layer_id, double segment_length, int route_layer_id);

std::string makeRCTreeInnerNode(const std::string& net_name, int id, float cap);
std::string makeRCTreeObjNode(const std::string& pin_port_name, float cap);


+ 2
- 2
src/interface/python/py_ista/py_register_ista.h View File

@@ -38,8 +38,8 @@ void register_ista(py::module& m)
m.def("read_sdc", readSdc, py::arg("file_name"));

m.def("get_net_name", getNetName, py::arg("pin_port_name"));
m.def("get_segment_capacitance", getSegmentCapacitance, py::arg("layer_id"), py::arg("segment_length"));
m.def("get_segment_resistance", getSegmentResistance, py::arg("layer_id"), py::arg("segment_length"));
m.def("get_segment_capacitance", getSegmentCapacitance, py::arg("layer_id"), py::arg("segment_length"), py::arg("route_layer_id"));
m.def("get_segment_resistance", getSegmentResistance, py::arg("layer_id"), py::arg("segment_length"), py::arg("route_layer_id"));
m.def("make_rc_tree_inner_node", makeRCTreeInnerNode, py::arg("net_name"), py::arg("id"), py::arg("cap"));
m.def("make_rc_tree_obj_node", makeRCTreeObjNode, py::arg("pin_port_name"), py::arg("cap"));


+ 8
- 0
src/interface/python/py_vec/CMakeLists.txt View File

@@ -1,7 +1,15 @@
set(CMAKE_BUILD_TYPE Debug)


aux_source_directory(. PY_VEC_SRC)

add_library(py_vec ${PY_VEC_SRC})

target_compile_options(py_vec PRIVATE
$<$<CONFIG:Debug>:-g -O0>
)


target_link_libraries(py_vec
PUBLIC
ivec_api


+ 2
- 0
src/interface/python/py_vec/py_register_vec.h View File

@@ -27,6 +27,8 @@ void register_vectorization(py::module& m)
m.def("layout_patchs", layout_patchs, py::arg("path"));
m.def("layout_graph", layout_graph, py::arg("path"));
m.def("generate_vectors", generate_vectors, py::arg("dir"), py::arg("patch_row_step") = 9, py::arg("patch_col_step") = 9);
m.def("read_vectors_nets", read_vectors_nets, py::arg("dir"));
m.def("read_vectors_nets_patterns", read_vectors_nets_patterns, py::arg("path"));

py::class_<ieval::TimingWireNode>(m, "TimingWireNode")
.def_readwrite("name", &ieval::TimingWireNode::_name)


+ 18
- 0
src/interface/python/py_vec/py_vec.cpp View File

@@ -82,4 +82,22 @@ ieval::TimingInstanceGraph get_timing_instance_graph(std::string instance_graph_
return timing_instance_graph;
}

bool read_vectors_nets(std::string dir)
{
if (dir == "") {
return false;
}
ivec::VectorizationApi lm_api;
return lm_api.readVectorsNets(dir);
}

bool read_vectors_nets_patterns(std::string path)
{
if (path == "") {
return false;
}
ivec::VectorizationApi lm_api;
return lm_api.readVectorsNetsPatterns(path);
}

} // namespace python_interface

+ 2
- 0
src/interface/python/py_vec/py_vec.h View File

@@ -25,6 +25,8 @@ namespace python_interface {
bool layout_patchs(const std::string& path);
bool layout_graph(const std::string& path);
bool generate_vectors(std::string dir, int patch_row_step, int patch_col_step);
bool read_vectors_nets(std::string dir);
bool read_vectors_nets_patterns(std::string path);

// for vectorization wire timing graph.
ieval::TimingWireGraph get_timing_wire_graph(std::string wire_graph_path);


+ 2
- 0
src/interface/tcl/tcl_vec/tcl_register_vec.h View File

@@ -36,6 +36,8 @@ int registerCmdVectorization()
registerTclCmd(CmdVecLayoutPatchs, "layout_patchs");
registerTclCmd(CmdVecLayoutGraph, "layout_graph");
registerTclCmd(CmdVecFeature, "generate_vectors");
registerTclCmd(CmdReadVecNets, "read_vectors_nets");
registerTclCmd(CmdReadVecNetsPattern, "read_vectors_nets_patterns");

return EXIT_SUCCESS;
}


+ 73
- 5
src/interface/tcl/tcl_vec/tcl_vec.cpp View File

@@ -107,17 +107,17 @@ CmdVecFeature::CmdVecFeature(const char* cmd_name) : TclCmd(cmd_name)
auto* dir_option = new TclStringOption(TCL_DIRECTORY, 1, nullptr);
addOption(dir_option);

auto* row_step_option = new TclIntOption(TCL_PATCH_ROW_STEP, 0, 9);
auto* row_step_option = new TclIntOption(TCL_PATCH_ROW_STEP, 0, 9);
addOption(row_step_option);

auto* col_step_option = new TclIntOption(TCL_PATCH_COL_STEP, 0, 9);
auto* col_step_option = new TclIntOption(TCL_PATCH_COL_STEP, 0, 9);
addOption(col_step_option);
}

unsigned CmdVecFeature::check()
{
TclOption* dir_option = getOptionOrArg(TCL_DIRECTORY);
LOG_FATAL_IF(!dir_option);
LOG_FATAL_IF(!dir_option);

TclOption* row_step_option = getOptionOrArg(TCL_PATCH_ROW_STEP);
if (row_step_option && row_step_option->getIntVal() <= 0) {
@@ -145,13 +145,13 @@ unsigned CmdVecFeature::exec()
auto path_option = dir_option->getStringVal();
std::string path = path_option == nullptr ? "./vectors" : path_option;

int patch_row_step = 9;
int patch_row_step = 9;
TclOption* row_step_option = getOptionOrArg(TCL_PATCH_ROW_STEP);
if (row_step_option != nullptr) {
patch_row_step = row_step_option->getIntVal();
}

int patch_col_step = 9;
int patch_col_step = 9;
TclOption* col_step_option = getOptionOrArg(TCL_PATCH_COL_STEP);
if (col_step_option != nullptr) {
patch_col_step = col_step_option->getIntVal();
@@ -167,4 +167,72 @@ unsigned CmdVecFeature::exec()
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

CmdReadVecNets::CmdReadVecNets(const char* cmd_name) : TclCmd(cmd_name)
{
auto* dir_option = new TclStringOption(TCL_DIRECTORY, 1, nullptr);
addOption(dir_option);
}

unsigned CmdReadVecNets::check()
{
TclOption* dir_option = getOptionOrArg(TCL_DIRECTORY);
LOG_FATAL_IF(!dir_option);

return 1;
}

unsigned CmdReadVecNets::exec()
{
if (!check()) {
return 0;
}

TclOption* dir_option = getOptionOrArg(TCL_DIRECTORY);
if (dir_option != nullptr) {
auto path_option = dir_option->getStringVal();
std::string path = path_option == nullptr ? "./vectors/nets" : path_option;

ivec::VectorizationApi vec_api;
vec_api.readVectorsNets(path);
}

return 1;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CmdReadVecNetsPattern::CmdReadVecNetsPattern(const char* cmd_name) : TclCmd(cmd_name)
{
auto* path_option = new TclStringOption(TCL_PATH, 1, nullptr);
addOption(path_option);
}

unsigned CmdReadVecNetsPattern::check()
{
TclOption* path_option = getOptionOrArg(TCL_PATH);
LOG_FATAL_IF(!path_option);

return 1;
}

unsigned CmdReadVecNetsPattern::exec()
{
if (!check()) {
return 0;
}

TclOption* path_option = getOptionOrArg(TCL_PATH);
if (path_option != nullptr) {
auto path = path_option->getStringVal();

ivec::VectorizationApi vec_api;
vec_api.readVectorsNetsPatterns(path);
}

return 1;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

} // namespace tcl

+ 28
- 0
src/interface/tcl/tcl_vec/tcl_vec.h View File

@@ -78,4 +78,32 @@ class CmdVecFeature : public TclCmd
// private data
};

class CmdReadVecNets : public TclCmd
{
public:
explicit CmdReadVecNets(const char* cmd_name);
~CmdReadVecNets() override = default;

unsigned check() override;
unsigned exec() override;

private:
// private function
// private data
};

class CmdReadVecNetsPattern : public TclCmd
{
public:
explicit CmdReadVecNetsPattern(const char* cmd_name);
~CmdReadVecNetsPattern() override = default;

unsigned check() override;
unsigned exec() override;

private:
// private function
// private data
};

} // namespace tcl

+ 2
- 0
src/operation/iPA/api/Power.cc View File

@@ -1222,6 +1222,8 @@ unsigned Power::reportIRDropTable(const char* rpt_file_name) {
std::fprintf(f.get(), "Report : Net %s IR Drop Report, Unit V\n",
net_name.c_str());
std::fprintf(f.get(), "%s\n", report_tbl->c_str());
LOG_INFO << "Instance IR Drop Report for net " << net_name << " :\n"
<< report_tbl->c_str();
}

return 1;


+ 76
- 0
src/operation/iPL/api/PLAPI.cc View File

@@ -457,6 +457,52 @@ void PLAPI::runFlow()
writeBackSourceDataBase();
}

void PLAPI::runAiFlow(const std::string& onnx_path, const std::string& normalization_path)
{
runGP();
notifyPLWLInfo(0);

if (PlacerDBInst.get_placer_config()->get_buffer_config().isMaxLengthOpt()) {
std::cout << std::endl;
runBufferInsertion();
printHPWLInfo();
}

if (PlacerDBInst.get_placer_config()->get_dp_config().isEnableNetworkflow()) {
std::cout << std::endl;
runNetworkFlowSpread();
}

std::cout << std::endl;
runLG();
notifyPLWLInfo(1);

std::cout << std::endl;
if (isSTAStarted()) {
runPostGP();
} else {
#ifdef BUILD_AI_PREDICTOR
runAIDP(onnx_path, normalization_path);
#else
runDP();
#endif
}
notifyPLWLInfo(2);

std::cout << std::endl;

reportPLInfo();
std::cout << std::endl;
LOG_INFO << "Log has been writed to dir: ./result/pl/log/";


if (isSTAStarted()) {
_external_api->destroyTimingEval();
}

writeBackSourceDataBase();
}

void PLAPI::insertLayoutFiller()
{
notifyPLOriginInfo();
@@ -538,6 +584,36 @@ void PLAPI::runDP()
}
}

#ifdef BUILD_AI_PREDICTOR
void PLAPI::runAIDP(const std::string& onnx_path, const std::string& normalization_path)
{
bool legal_flag = checkLegality();
if (!legal_flag) {
LOG_WARNING << "Design Instances before detail placement are not legal";
return;
}

DetailPlacer detail_place(PlacerDBInst.get_placer_config(), &PlacerDBInst);

if (!detail_place.loadAIWirelengthModel(onnx_path)) {
LOG_ERROR << "Failed to load AI wirelength model: " << onnx_path;
LOG_INFO << "Falling back to traditional HPWL";
} else {
detail_place.setUseAIWirelength(true);
}

if(!detail_place.loadAIWirelengthNormalizationParams(normalization_path)){
LOG_ERROR << "Failed to load AI wirelength normalization parameters: " << normalization_path;
}

detail_place.runDetailPlace();

if (!checkLegality()) {
LOG_WARNING << "DP result is not legal";
}
}
#endif

// run networkflow to spread cell
// Input: after global placement. Output: low density distribution result with overlap.
// Legalization is further needed.


+ 4
- 0
src/operation/iPL/api/PLAPI.hh View File

@@ -45,6 +45,7 @@ class PLAPI

void initAPI(std::string pl_json_path, idb::IdbBuilder* idb_builder);
void runFlow();
void runAiFlow(const std::string& onnx_path, const std::string& normalization_path);
void runIncrementalFlow();
void insertLayoutFiller();

@@ -57,6 +58,9 @@ class PLAPI
bool runIncrLG(std::vector<std::string> inst_name_list);
void runPostGP();
void runDP();
#ifdef BUILD_AI_PREDICTOR
void runAIDP(const std::string& onnx_path, const std::string& normalization_path);
#endif
void runBufferInsertion();
void writeBackSourceDataBase();



+ 14
- 3
src/operation/iPL/source/module/CMakeLists.txt View File

@@ -12,6 +12,7 @@ set(iPL_GRID_MANAGER ${iPL_MODULE}/grid_manager)
set(iPL_LOGGER ${iPL_MODULE}/logger)
set(iPL_TOPOLOGY_MANAGER ${iPL_MODULE}/topology_manager)
set(iPL_WRAPPER ${iPL_MODULE}/wrapper)
set(iPL_AI_PREDICTOR ${iPL_MODULE}/ai_predictor)

# add_subdirectory(${iPL_MP})
add_subdirectory(${iPL_IP})
@@ -27,14 +28,15 @@ add_subdirectory(${iPL_GRID_MANAGER})
add_subdirectory(${iPL_LOGGER})
add_subdirectory(${iPL_TOPOLOGY_MANAGER})
add_subdirectory(${iPL_WRAPPER})
if(BUILD_AI_PREDICTOR)
add_subdirectory(${iPL_AI_PREDICTOR})
endif()

add_library(ipl-module INTERFACE)
target_link_libraries(ipl-module
INTERFACE
set(IPL_MODULE_LIBS
ipl-module-checker
ipl-module-detail_placer
ipl-module-evaluator

ipl-module-buffer
ipl-module-filler
ipl-module-global_placer
@@ -48,4 +50,13 @@ target_link_libraries(ipl-module
ipl-module-wrapper
)

if(BUILD_AI_PREDICTOR)
list(APPEND IPL_MODULE_LIBS ipl-module-ai_predictor)
endif()

target_link_libraries(ipl-module
INTERFACE
${IPL_MODULE_LIBS}
)

target_include_directories(ipl-module INTERFACE ${iPL_MODULE})

+ 13
- 0
src/operation/iPL/source/module/ai_predictor/CMakeLists.txt View File

@@ -0,0 +1,13 @@
#set
set(iPL_WIRELENGTH_PREDICT ${iPL_AI_PREDICTOR}/wirelength)

add_subdirectory(${iPL_WIRELENGTH_PREDICT})

add_library(ipl-module-ai_predictor INTERFACE)

target_link_libraries(ipl-module-ai_predictor
INTERFACE
ipl_module_ai_wirelength
)

target_include_directories(ipl-module-ai_predictor INTERFACE ${iPL_AI_PREDICTOR})

+ 17
- 0
src/operation/iPL/source/module/ai_predictor/wirelength/CMakeLists.txt View File

@@ -0,0 +1,17 @@
add_library(ipl_module_ai_wirelength
wirelength_predictor.cc
onnx_model_handler.cc
normalization_handler.cc
)

target_link_libraries(ipl_module_ai_wirelength
PUBLIC
${HOME_THIRDPARTY}/onnxruntime/libonnxruntime.so
)

target_include_directories(ipl_module_ai_wirelength
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
${HOME_THIRDPARTY}/onnxruntime/include
${HOME_THIRDPARTY}/json
)

+ 143
- 0
src/operation/iPL/source/module/ai_predictor/wirelength/normalization_handler.cc View File

@@ -0,0 +1,143 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************

#include "normalization_handler.hh"

#include <fstream>
#include <iostream>
#include <sstream>
#include <algorithm>
#include <cmath>

#include "json.hpp"

namespace ipl {

NormalizationHandler::NormalizationHandler() : _is_loaded(false) {}

NormalizationHandler::~NormalizationHandler() {}

bool NormalizationHandler::loadMinMaxParams(const std::string& params_path) {
return _parseJsonParams(params_path);
}

void NormalizationHandler::setMinMaxParams(const std::vector<float>& data_min,
const std::vector<float>& data_max,
const std::vector<std::string>& feature_names) {
if (data_min.size() != data_max.size()) {
std::cerr << "Error: data_min and data_max must have same size" << std::endl;
return;
}

_data_min = data_min;
_data_max = data_max;
_feature_names = feature_names;
_is_loaded = true;
}

std::vector<float> NormalizationHandler::normalize(const std::vector<float>& features) const {
if (!_is_loaded) {
std::cerr << "Error: Normalization parameters not loaded" << std::endl;
return features;
}

if (features.size() != _data_min.size()) {
std::cerr << "Error: Feature size mismatch. Expected " << _data_min.size()
<< ", got " << features.size() << std::endl;
return features;
}

std::vector<float> normalized_features;
normalized_features.reserve(features.size());

for (size_t i = 0; i < features.size(); ++i) {
float range = _data_max[i] - _data_min[i];
if (range == 0.0f) {
// if max == min, normailzed = 0
normalized_features.push_back(0.0f);
} else {
// MinMax normalization: (x - min) / (max - min)
float normalized = (features[i] - _data_min[i]) / range;

normalized = std::max(0.0f, std::min(1.0f, normalized));
normalized_features.push_back(normalized);
}
}

return normalized_features;
}

bool NormalizationHandler::isReady() const {
return _is_loaded;
}

std::vector<std::string> NormalizationHandler::getFeatureNames() const {
return _feature_names;
}

size_t NormalizationHandler::getFeatureCount() const {
return _data_min.size();
}

bool NormalizationHandler::_parseJsonParams(const std::string& params_path) {
std::ifstream file(params_path);
if (!file.is_open()) {
std::cerr << "Error: Cannot open normalization parameters file: " << params_path << std::endl;
return false;
}

try {
nlohmann::json j;
file >> j;

// parse data
if (j.contains("data_min") && j.contains("data_max")) {
_data_min = j["data_min"].get<std::vector<float>>();
_data_max = j["data_max"].get<std::vector<float>>();
if (j.contains("feature_names")) {
_feature_names = j["feature_names"].get<std::vector<std::string>>();
}

if (_data_min.size() != _data_max.size() || _data_min.empty()) {
std::cerr << "Error: Invalid normalization parameters - size mismatch" << std::endl;
return false;
}

_is_loaded = true;

std::cout << "Successfully loaded normalization parameters:" << std::endl;
std::cout << " Features: " << _data_min.size() << std::endl;
std::cout << " Feature names: ";
for (const auto& name : _feature_names) {
std::cout << name << " ";
}
std::cout << std::endl;

return true;
} else {
std::cerr << "Error: Missing required fields in JSON" << std::endl;
return false;
}

} catch (const std::exception& e) {
std::cerr << "Error parsing JSON: " << e.what() << std::endl;
return false;
}
}

} // namespace ipl

+ 63
- 0
src/operation/iPL/source/module/ai_predictor/wirelength/normalization_handler.hh View File

@@ -0,0 +1,63 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
#ifndef NORMALIZATION_HANDLER_HH
#define NORMALIZATION_HANDLER_HH

#include <vector>
#include <string>
#include <memory>

namespace ipl {

class NormalizationHandler {
public:
NormalizationHandler();
~NormalizationHandler();

// Load MinMaxScaler parameters from JSON file
bool loadMinMaxParams(const std::string& params_path);

// Set MinMaxScaler parameters manually
void setMinMaxParams(const std::vector<float>& data_min,
const std::vector<float>& data_max,
const std::vector<std::string>& feature_names = {});

// Normalize input features using MinMax scaling
std::vector<float> normalize(const std::vector<float>& features) const;

// Check if normalization parameters are loaded
bool isReady() const;

// Get feature names
std::vector<std::string> getFeatureNames() const;

// Get number of features
size_t getFeatureCount() const;

private:
std::vector<float> _data_min;
std::vector<float> _data_max;
std::vector<std::string> _feature_names;
bool _is_loaded = false;

// Parse JSON file to extract normalization parameters
bool _parseJsonParams(const std::string& params_path);
};

} // namespace ipl

#endif // NORMALIZATION_HANDLER_HH

+ 210
- 0
src/operation/iPL/source/module/ai_predictor/wirelength/onnx_model_handler.cc View File

@@ -0,0 +1,210 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
#include "onnx_model_handler.hh"

#include <iostream>

namespace ipl {

ONNXModelHandler::ONNXModelHandler() : _env(ORT_LOGGING_LEVEL_WARNING, "ONNXModelHandler") {
// Initialize ONNX Runtime environment
_session_options.SetIntraOpNumThreads(1);
_session_options.SetInterOpNumThreads(1);
_session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_BASIC);
}

ONNXModelHandler::~ONNXModelHandler() {
// Clean up - smart pointers handle this automatically
}

bool ONNXModelHandler::loadModel(const std::string& model_path) {
try {
// Create session from model file
_session = std::make_unique<Ort::Session>(_env, model_path.c_str(), _session_options);

// Get allocator
Ort::AllocatorWithDefaultOptions allocator;

// Get input information
size_t num_input_nodes = _session->GetInputCount();
if (num_input_nodes == 0) {
std::cerr << "Model has no input nodes" << std::endl;
return false;
}

// Get input names and shapes
_input_names.clear();
_input_shapes.clear();
for (size_t i = 0; i < num_input_nodes; i++) {
// Get input name using the correct API
Ort::AllocatedStringPtr input_name_ptr = _session->GetInputNameAllocated(i, allocator);
_input_names.push_back(std::string(input_name_ptr.get()));

// Get input type info
Ort::TypeInfo input_type_info = _session->GetInputTypeInfo(i);
auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
// Get input shape
std::vector<int64_t> input_shape = input_tensor_info.GetShape();
_input_shapes.push_back(input_shape);
}

// Get output information
size_t num_output_nodes = _session->GetOutputCount();
if (num_output_nodes == 0) {
std::cerr << "Model has no output nodes" << std::endl;
return false;
}

// Get output names and shapes
_output_names.clear();
_output_shapes.clear();
for (size_t i = 0; i < num_output_nodes; i++) {
// Get output name
Ort::AllocatedStringPtr output_name_ptr = _session->GetOutputNameAllocated(i, allocator);
_output_names.push_back(std::string(output_name_ptr.get()));

// Get output type info
Ort::TypeInfo output_type_info = _session->GetOutputTypeInfo(i);
auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
// Get output shape
std::vector<int64_t> output_shape = output_tensor_info.GetShape();
_output_shapes.push_back(output_shape);
}

// Validate shapes for our use case
if (_input_shapes[0].size() != 2) {
std::cerr << "Unexpected input shape dimension: " << _input_shapes[0].size() << std::endl;
return false;
}

if (_output_shapes[0].size() != 2) {
std::cerr << "Unexpected output shape dimension: " << _output_shapes[0].size() << std::endl;
return false;
}

// Set feature counts (assuming batch dimension is dynamic or 1)
_input_feature_count = static_cast<int>(_input_shapes[0][1]);
_output_feature_count = static_cast<int>(_output_shapes[0][1]);

std::cout << "Successfully loaded ONNX model from " << model_path << std::endl;
std::cout << "Input name: " << _input_names[0] << std::endl;
std::cout << "Output name: " << _output_names[0] << std::endl;
std::cout << "Input feature count: " << _input_feature_count << std::endl;
std::cout << "Output feature count: " << _output_feature_count << std::endl;

return true;
} catch (const Ort::Exception& e) {
std::cerr << "ONNX exception: " << e.what() << std::endl;
return false;
} catch (const std::exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
return false;
}
}

std::vector<float> ONNXModelHandler::predict(const std::vector<float>& input) {
if (!_session) {
std::cerr << "Model not loaded" << std::endl;
return {};
}

if (input.size() != static_cast<size_t>(_input_feature_count)) {
std::cerr << "Input feature count mismatch: expected " << _input_feature_count
<< ", got " << input.size() << std::endl;
return {};
}

try {
// Create input tensor
const std::vector<int64_t> input_shape = {1, _input_feature_count}; // Batch size 1
Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(
OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);

Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
memory_info, const_cast<float*>(input.data()), input.size(),
input_shape.data(), input_shape.size());

if (!input_tensor.IsTensor()) {
std::cerr << "Failed to create input tensor" << std::endl;
return {};
}

// Prepare input and output names
std::vector<const char*> input_names_cstr;
std::vector<const char*> output_names_cstr;
for (const auto& name : _input_names) {
input_names_cstr.push_back(name.c_str());
}
for (const auto& name : _output_names) {
output_names_cstr.push_back(name.c_str());
}

// Run inference
std::vector<Ort::Value> output_tensors = _session->Run(
Ort::RunOptions{nullptr},
input_names_cstr.data(), &input_tensor, 1,
output_names_cstr.data(), output_names_cstr.size());

if (output_tensors.empty()) {
std::cerr << "Failed to get output tensors" << std::endl;
return {};
}

// Get output data
float* output_data = output_tensors[0].GetTensorMutableData<float>();
if (!output_data) {
std::cerr << "Failed to get output data" << std::endl;
return {};
}

// Get the actual output size
auto output_tensor_info = output_tensors[0].GetTensorTypeAndShapeInfo();
std::vector<int64_t> output_shape = output_tensor_info.GetShape();
size_t output_size = 1;
for (int64_t dim : output_shape) {
output_size *= static_cast<size_t>(dim);
}

// Copy output data to vector
std::vector<float> output(output_data, output_data + output_size);
return output;
} catch (const Ort::Exception& e) {
std::cerr << "ONNX exception during inference: " << e.what() << std::endl;
return {};
} catch (const std::exception& e) {
std::cerr << "Exception during inference: " << e.what() << std::endl;
return {};
}
}

int ONNXModelHandler::getInputFeatureCount() const {
return _input_feature_count;
}

int ONNXModelHandler::getOutputFeatureCount() const {
return _output_feature_count;
}

} // namespace ipl

+ 60
- 0
src/operation/iPL/source/module/ai_predictor/wirelength/onnx_model_handler.hh View File

@@ -0,0 +1,60 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
#ifndef ONNX_MODEL_HANDLER_HH
#define ONNX_MODEL_HANDLER_HH

#include <string>
#include <vector>
#include <memory>
#include <iostream>

#include "onnxruntime_cxx_api.h"

namespace ipl {

class ONNXModelHandler {
public:
ONNXModelHandler();
~ONNXModelHandler();

// Load ONNX model from file
bool loadModel(const std::string& model_path);

// Predict using the loaded ONNX model
std::vector<float> predict(const std::vector<float>& input);

// Get the number of input features expected by the model
int getInputFeatureCount() const;

// Get the number of output features produced by the model
int getOutputFeatureCount() const;

private:
Ort::Env _env;
Ort::SessionOptions _session_options;
std::unique_ptr<Ort::Session> _session;
int _input_feature_count = 0;
int _output_feature_count = 0;
std::vector<std::string> _input_names;
std::vector<std::string> _output_names;
std::vector<std::vector<int64_t>> _input_shapes;
std::vector<std::vector<int64_t>> _output_shapes;
};

} // namespace ipl

#endif // ONNX_MODEL_HANDLER_HH

+ 146
- 0
src/operation/iPL/source/module/ai_predictor/wirelength/wirelength_predictor.cc View File

@@ -0,0 +1,146 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************

#include "wirelength_predictor.hh"

#include <iostream>

namespace ipl {

WirelengthPredictor::WirelengthPredictor() :
_model_handler(std::make_unique<ONNXModelHandler>()),
_via_normalizer(std::make_unique<NormalizationHandler>()),
_wirelength_normalizer(std::make_unique<NormalizationHandler>()),
_is_wirelength_model(false)
{
std::cout << "Wirelength predictor initialized" << std::endl;
}

bool WirelengthPredictor::loadModel(const std::string& model_path)
{
if (!_model_handler->loadModel(model_path)) {
return false;
}

// Assume all models are wirelength models by default
_is_wirelength_model = true;

std::cout << "Successfully loaded wirelength prediction model from " << model_path << std::endl;
return true;
}

float WirelengthPredictor::predictWirelength(const std::vector<float>& features)
{
if (!isModelLoaded()) {
std::cerr << "Model not loaded" << std::endl;
return -1.0f;
}

if (!_is_wirelength_model) {
std::cerr << "Loaded model is not a wirelength prediction model" << std::endl;
return -1.0f;
}

std::vector<float> normalized_features = normalizeFeatures(features, true);

std::vector<float> output = _model_handler->predict(normalized_features);
if (output.empty()) {
std::cerr << "Prediction failed" << std::endl;
return -1.0f;
}

float prediction = output[0];
std::cout << "Net wirelength prediction: " << prediction << std::endl;
return prediction;
}

float WirelengthPredictor::predictViaCount(int net_id, const std::vector<float>& features)
{
if (!isModelLoaded()) {
std::cerr << "Model not loaded" << std::endl;
return -1.0f;
}

if (_is_wirelength_model) {
std::cerr << "Loaded model is not a via count prediction model" << std::endl;
return -1.0f;
}

std::vector<float> normalized_features = normalizeFeatures(features, false);

std::vector<float> output = _model_handler->predict(normalized_features);
if (output.empty()) {
std::cerr << "Prediction failed" << std::endl;
return -1.0f;
}

float prediction = output[0];
std::cout << "Net " << net_id << " via count prediction: " << prediction << std::endl;
return prediction;
}

bool WirelengthPredictor::loadViaNormalizationParams(const std::string& params_path)
{
if (!_via_normalizer->loadMinMaxParams(params_path)) {
std::cerr << "Failed to load via normalization parameters: " << params_path << std::endl;
return false;
}
std::cout << "Successfully loaded via normalization parameters: " << params_path << std::endl;
return true;
}

bool WirelengthPredictor::loadWirelengthNormalizationParams(const std::string& params_path)
{
if (!_wirelength_normalizer->loadMinMaxParams(params_path)) {
std::cerr << "Failed to load wirelength normalization parameters: " << params_path << std::endl;
return false;
}
std::cout << "Successfully loaded wirelength normalization parameters: " << params_path << std::endl;
return true;
}

int WirelengthPredictor::getRequiredFeatureCount() const
{
if (!isModelLoaded()) {
std::cerr << "Model not loaded" << std::endl;
return 0;
}

return _model_handler->getInputFeatureCount();
}

bool WirelengthPredictor::isModelLoaded() const
{
// Check if model handler has loaded a model
return _model_handler->getInputFeatureCount() > 0;
}

std::vector<float> WirelengthPredictor::normalizeFeatures(const std::vector<float>& features, bool is_wirelength)
{
if (is_wirelength && _wirelength_normalizer && _wirelength_normalizer->isReady()) {
std::vector<float> normalized = _wirelength_normalizer->normalize(features);
return normalized;
} else if (!is_wirelength && _via_normalizer && _via_normalizer->isReady()) {
std::vector<float> normalized = _via_normalizer->normalize(features);
return normalized;
} else {
std::cerr << "Warning: normalization parameters not loaded, using raw features" << std::endl;
return features;
}
}

} // namespace ipl

+ 64
- 0
src/operation/iPL/source/module/ai_predictor/wirelength/wirelength_predictor.hh View File

@@ -0,0 +1,64 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
#pragma once
#include <string>
#include <memory>
#include <vector>

#include "onnx_model_handler.hh"
#include "normalization_handler.hh"

namespace ipl {

class WirelengthPredictor
{
public:
WirelengthPredictor();
~WirelengthPredictor() {}
// Load wirelength prediction model
bool loadModel(const std::string& model_path);

// Predict wirelength for a net based on its features
float predictWirelength(const std::vector<float>& features);

// Predict via count for a net based on its features
float predictViaCount(int net_id, const std::vector<float>& features);

// Load normalization parameters for via prediction
bool loadViaNormalizationParams(const std::string& params_path);

// Load normalization parameters for wirelength prediction
bool loadWirelengthNormalizationParams(const std::string& params_path);

// Get required feature count for the model
int getRequiredFeatureCount() const;

// Check if model is loaded
bool isModelLoaded() const;

private:
std::unique_ptr<ONNXModelHandler> _model_handler;
std::unique_ptr<NormalizationHandler> _via_normalizer;
std::unique_ptr<NormalizationHandler> _wirelength_normalizer;
bool _is_wirelength_model;

// Normalize features based on prediction type
std::vector<float> normalizeFeatures(const std::vector<float>& features, bool is_wirelength);
};

} // namespace ipl

+ 1
- 0
src/operation/iPL/source/module/detail_placer/CMakeLists.txt View File

@@ -34,6 +34,7 @@ target_link_libraries(ipl-module-detail_placer
ipl-configurator
ipl-module-grid_manager
ipl-module-topology_manager
ipl_module_evaluator_wirelength
)

target_include_directories(ipl-module-detail_placer PUBLIC ${iPL_DP})

+ 70
- 2
src/operation/iPL/source/module/detail_placer/DetailPlacer.cc View File

@@ -18,6 +18,9 @@

#include "module/evaluator/density/Density.hh"
#include "module/evaluator/wirelength/HPWirelength.hh"
#ifdef BUILD_AI_PREDICTOR
#include "module/evaluator/wirelength/AIWirelength.hh"
#endif
#include "operation/BinOpt.hh"
#include "operation/InstanceSwap.hh"
#include "operation/LocalReorder.hh"
@@ -36,6 +39,12 @@ DetailPlacer::DetailPlacer(Config* pl_config, PlacerDB* placer_db)

initDPDatabase(placer_db);
_operator.initDPOperator(&_database, &_config);

#ifdef BUILD_AI_PREDICTOR
// Initialize AI wirelength evaluator
_ai_wirelength_evaluator = std::make_unique<AIWirelength>(_operator.get_topo_manager());
_use_ai_wirelength = false;
#endif
}

DetailPlacer::~DetailPlacer()
@@ -593,9 +602,68 @@ void DetailPlacer::notifyPLPlaceDensity()

int64_t DetailPlacer::calTotalHPWL()
{
HPWirelength hpwl_eval(_operator.get_topo_manager());
return hpwl_eval.obtainTotalWirelength() + _database._outside_wl;
#ifdef BUILD_AI_PREDICTOR
if (_use_ai_wirelength && _ai_wirelength_evaluator && _ai_wirelength_evaluator->isModelLoaded()) {
LOG_INFO << "Calculate Total Wirelength using AI model.";
return calTotalAIWirelength() + _database._outside_wl;
} else {
#endif
HPWirelength hpwl_eval(_operator.get_topo_manager());
return hpwl_eval.obtainTotalWirelength() + _database._outside_wl;
#ifdef BUILD_AI_PREDICTOR
}
#endif
}

#ifdef BUILD_AI_PREDICTOR
bool DetailPlacer::loadAIWirelengthModel(const std::string& model_path)
{
if (_ai_wirelength_evaluator) {
bool success = _ai_wirelength_evaluator->loadModel(model_path);
if (success) {
LOG_INFO << "Successfully loaded AI wirelength model: " << model_path;
} else {
LOG_ERROR << "Failed to load AI wirelength model: " << model_path;
}
return success;
}
return false;
}

bool DetailPlacer::loadAIWirelengthNormalizationParams(const std::string& params_path)
{
if (_ai_wirelength_evaluator) {
bool success = _ai_wirelength_evaluator->loadNormalizationParams(params_path);
if (success) {
LOG_INFO << "Successfully loaded AI wirelength normalization parameters: " << params_path;
} else {
LOG_ERROR << "Failed to load AI wirelength normalization parameters: " << params_path;
}
return success;
}
return false;
}

void DetailPlacer::setUseAIWirelength(bool use_ai)
{
_use_ai_wirelength = use_ai;
if (_use_ai_wirelength) {
if (!_ai_wirelength_evaluator || !_ai_wirelength_evaluator->isModelLoaded()) {
LOG_WARNING << "AI wirelength model not loaded, falling back to HPWL";
_use_ai_wirelength = false;
}
}
LOG_INFO << "AI wirelength prediction " << (_use_ai_wirelength ? "enabled" : "disabled");
}

int64_t DetailPlacer::calTotalAIWirelength()
{
if (_ai_wirelength_evaluator && _ai_wirelength_evaluator->isModelLoaded()) {
return _ai_wirelength_evaluator->obtainTotalWirelength();
}
return 0;
}
#endif

float DetailPlacer::calPeakBinDensity()
{


+ 15
- 0
src/operation/iPL/source/module/detail_placer/DetailPlacer.hh View File

@@ -32,6 +32,9 @@
#include "PlacerDB.hh"
#include "TopologyManager.hh"
#include "database/DPDatabase.hh"
#ifdef BUILD_AI_PREDICTOR
#include "AIWirelength.hh"
#endif

namespace ipl {

@@ -54,10 +57,22 @@ class DetailPlacer

void runDetailPlaceNFS();

#ifdef BUILD_AI_PREDICTOR
// AI wirelength prediction methods
bool loadAIWirelengthModel(const std::string& model_path);
bool loadAIWirelengthNormalizationParams(const std::string& params_path);
void setUseAIWirelength(bool use_ai);
int64_t calTotalAIWirelength();
#endif

private:
DPConfig _config;
DPDatabase _database;
DPOperator _operator;
#ifdef BUILD_AI_PREDICTOR
std::unique_ptr<AIWirelength> _ai_wirelength_evaluator;
bool _use_ai_wirelength = false;
#endif

void initDPConfig(Config* pl_config);
void initDPDatabase(PlacerDB* placer_db);


+ 147
- 0
src/operation/iPL/source/module/evaluator/wirelength/AIWirelength.cc View File

@@ -0,0 +1,147 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
/*
* @Description: AI-based wirelength evaluator implementation
* @FilePath: /iEDA/src/iPL/src/evaluator/wirelength/AIWirelength.cc
*/

#include "AIWirelength.hh"
#include "Log.hh"
#include "data/Point.hh"
#include "data/Rectangle.hh"
#include <vector>

namespace ipl {

bool AIWirelength::loadModel(const std::string& model_path)
{
if (_predictor->loadModel(model_path)) {
_is_model_loaded = true;
LOG_INFO << "Successfully loaded AI wirelength prediction model: " << model_path;
return true;
} else {
LOG_ERROR << "Failed to load AI wirelength prediction model: " << model_path;
return false;
}
}

bool AIWirelength::loadNormalizationParams(const std::string& params_path)
{
if (_predictor->loadWirelengthNormalizationParams(params_path)) {
LOG_INFO << "Successfully loaded wirelength normalization parameters: " << params_path;
return true;
} else {
LOG_ERROR << "Failed to load wirelength normalization parameters: " << params_path;
return false;
}
}

int64_t AIWirelength::obtainTotalWirelength()
{
if (!_is_model_loaded) {
LOG_ERROR << "AI wirelength model not loaded";
return 0;
}

int64_t total_wirelength = 0;

for (auto* network : _topology_manager->get_network_list()) {
int64_t net_wirelength = obtainNetWirelength(network->get_network_id());
total_wirelength += net_wirelength;
}

return total_wirelength;
}

int64_t AIWirelength::obtainNetWirelength(int32_t net_id)
{
auto* network = _topology_manager->findNetworkById(net_id);
if (!network) {
LOG_ERROR << "Network with ID " << net_id << " not found";
return 0;
}

// Extract features for this net
std::vector<float> features = extractNetFeatures(net_id);

// Predict wirelength using the AI model
float predicted_wirelength_ratio = _predictor->predictWirelength(features);

int64_t predicted_wirelength = features[5] * predicted_wirelength_ratio;

// Convert float to int64_t (assuming wirelength is in integer units)
return predicted_wirelength;
}

int64_t AIWirelength::obtainPartOfNetWirelength(int32_t net_id, int32_t sink_pin_id)
{
// For simplicity, we'll just return the full net wirelength
// In a real implementation, you might want to predict partial wirelength
return obtainNetWirelength(net_id);
}

std::vector<float> AIWirelength::extractNetFeatures(int32_t net_id)
{
auto* network = _topology_manager->findNetworkById(net_id);
if (!network) {
LOG_ERROR << "Network with ID " << net_id << " not found";
return {};
}

std::vector<float> features;

// Bounding box dimensions (0:width, 1:height)
Rectangle<int32_t> net_bbox = network->obtainNetWorkShape();
int width = net_bbox.get_width();
int height = net_bbox.get_height();
features.push_back(static_cast<float>(width));
features.push_back(static_cast<float>(height));

// Number of pins (2:pin_num)
int num_pins = network->get_node_list().size();
features.push_back(static_cast<float>(num_pins));

// Aspect ratio (3:aspect_ratio)
float aspect_ratio = (height > 0) ? static_cast<float>(width) / height : 0.0f;
features.push_back(aspect_ratio);

// Lness (4:l_ness)
// For simplicity, we set a constant value; in practice, compute based on pin distribution
float lness = 0.5f;
features.push_back(lness);

// Steiner Tree (5:rsmt)
// For simplicity, we assume HPWL as the Steiner tree length
int64_t rsmt = network->obtainNetWorkShape().get_half_perimeter();
features.push_back(static_cast<float>(rsmt));

// area (6:area)
int area = width * height;
features.push_back(static_cast<float>(area));

// route_ratio_x (7:route_ratio_x)
float route_ratio_x = (width > 0) ? static_cast<float>(width) / area : 0.0f;
features.push_back(route_ratio_x);

// route_ratio_y (8:route_ratio_y)
float route_ratio_y = (height > 0) ? static_cast<float>(height) / area : 0.0f;
features.push_back(route_ratio_y);

return features;
}

} // namespace ipl

+ 78
- 0
src/operation/iPL/source/module/evaluator/wirelength/AIWirelength.hh View File

@@ -0,0 +1,78 @@
// ***************************************************************************************
// Copyright (c) 2023-2025 Peng Cheng Laboratory
// Copyright (c) 2023-2025 Institute of Computing Technology, Chinese Academy of Sciences
// Copyright (c) 2023-2025 Beijing Institute of Open Source Chip
//
// iEDA is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
//
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
// EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
// MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
//
// See the Mulan PSL v2 for more details.
// ***************************************************************************************
/*
* @Description: AI-based wirelength evaluator
* @FilePath: /iEDA/src/iPL/src/evaluator/wirelength/AIWirelength.hh
*/

#ifndef IPL_EVALUATOR_AI_WIRELENGTH_H
#define IPL_EVALUATOR_AI_WIRELENGTH_H

#include "Wirelength.hh"

#include <memory>

#include "wirelength_predictor.hh"


namespace ipl {

class AIWirelength : public Wirelength
{
public:
AIWirelength() = delete;
explicit AIWirelength(TopologyManager* topology_manager);
AIWirelength(const AIWirelength&) = delete;
AIWirelength(AIWirelength&&) = delete;
~AIWirelength() override = default;

AIWirelength& operator=(const AIWirelength&) = delete;
AIWirelength& operator=(AIWirelength&&) = delete;

// Load ONNX model for wirelength prediction
bool loadModel(const std::string& model_path);
bool loadNormalizationParams(const std::string& params_path);

// Check if model is loaded
bool isModelLoaded() const;

// Override virtual methods from Wirelength base class
int64_t obtainTotalWirelength() override;
int64_t obtainNetWirelength(int32_t net_id) override;
int64_t obtainPartOfNetWirelength(int32_t net_id, int32_t sink_pin_id) override;

// Extract features for a net
std::vector<float> extractNetFeatures(int32_t net_id);

private:
std::unique_ptr<WirelengthPredictor> _predictor;
bool _is_model_loaded = false;
};

inline AIWirelength::AIWirelength(TopologyManager* topology_manager) : Wirelength(topology_manager),
_predictor(std::make_unique<WirelengthPredictor>())
{
}

inline bool AIWirelength::isModelLoaded() const
{
return _is_model_loaded;
}

} // namespace ipl

#endif

+ 26
- 3
src/operation/iPL/source/module/evaluator/wirelength/CMakeLists.txt View File

@@ -1,18 +1,41 @@
add_library(ipl_module_evaluator_wirelength
set(WIRELENGTH_SOURCES
HPWirelength.cc
WAWirelengthGradient.cc
SteinerWirelength.cc
)

if(BUILD_AI_PREDICTOR)
list(APPEND WIRELENGTH_SOURCES AIWirelength.cc)
endif()

add_library(ipl_module_evaluator_wirelength ${WIRELENGTH_SOURCES})


target_link_libraries(ipl_module_evaluator_wirelength
PUBLIC
ipl-module-topology_manager
ipl-module-grid_manager
ipl-module-grid_manager
PRIVATE
ipl-module-logger
ipl-configurator
ipl-bridge
)

target_include_directories(ipl_module_evaluator_wirelength PUBLIC ${iPL_WIRELENGTH_EVALUATOR})
if(BUILD_AI_PREDICTOR)
target_link_libraries(ipl_module_evaluator_wirelength
PUBLIC
ipl-module-ai_predictor
)
endif()

target_include_directories(ipl_module_evaluator_wirelength
PUBLIC
${iPL_WIRELENGTH_EVALUATOR}
)

if(BUILD_AI_PREDICTOR)
target_include_directories(ipl_module_evaluator_wirelength
PUBLIC
${iPL_AI_PREDICTOR}/wirelength
)
endif()

+ 17
- 10
src/operation/iSTA/api/TimingIDBAdapter.cc View File

@@ -126,9 +126,8 @@ double TimingIDBAdapter::getResistance(int num_layer, double segment_length,
int routing_layer_id = num_layer - 1 + routing_layer_1st;
int routing_layer_size = routing_layers.size();

if (num_layer >= routing_layer_size ||
routing_layer_id >= routing_layer_size || num_layer < 0) {
LOG_FATAL << "Layer id error = " << num_layer;
if (routing_layer_id >= routing_layer_size || routing_layer_id < 0) {
LOG_FATAL << "Layer id error = " << routing_layer_id << " num layer = " << num_layer;
return 0;
}

@@ -166,19 +165,17 @@ double TimingIDBAdapter::getResistance(int num_layer, double segment_length,
* @return double cap unit is pf
*/
double TimingIDBAdapter::getCapacitance(int num_layer, double segment_length,
std::optional<double> segment_width) {
std::optional<double> segment_width, int routing_layer_1st) {
double segment_capacitance = 0;
IdbLayout* idb_layout = _idb_lef_service->get_layout();
vector<IdbLayer*>& routing_layers =
idb_layout->get_layers()->get_routing_layers();

int routing_layer_1st = 0; // dmInst->get_routing_layer_1st();
int routing_layer_id = num_layer - 1 + routing_layer_1st;
int routing_layer_size = routing_layers.size();

if (num_layer >= routing_layer_size ||
routing_layer_id >= routing_layer_size || num_layer < 0) {
LOG_FATAL << "Layer id error = " << num_layer;
if (routing_layer_id >= routing_layer_size || routing_layer_id < 0) {
LOG_FATAL << "Layer id error = " << routing_layer_id << " num layer = " << num_layer;
return 0;
}

@@ -743,7 +740,7 @@ unsigned TimingIDBAdapter::convertDBToTimingNetlist(bool link_all_cell) {
static_cast<double>(dbu);
double height = _idb_design->get_layout()->get_die()->get_height() /
static_cast<double>(dbu);
design_netlist.set_core_size(width, height);
design_netlist.set_die_size(width, height);

LOG_INFO << "core area width " << width << "um"
<< " height " << height << "um";
@@ -819,6 +816,11 @@ unsigned TimingIDBAdapter::convertDBToTimingNetlist(bool link_all_cell) {
sta_inst.addPin(cell_port_name.c_str(), library_port);
crossRef(inst_pin, db_inst_pin);

double pin_x = db_inst_pin->get_average_coordinate()->get_x() / static_cast<double>(dbu);
double pin_y = db_inst_pin->get_average_coordinate()->get_y() / static_cast<double>(dbu);

inst_pin->set_coordinate(pin_x, pin_y);

if (pin_bus) {
pin_bus->addPin(index.value(), inst_pin);
sta_inst.addPinBus(std::move(pin_bus));
@@ -835,7 +837,7 @@ unsigned TimingIDBAdapter::convertDBToTimingNetlist(bool link_all_cell) {
}
};

auto build_ports = [this, &design_netlist]() {
auto build_ports = [this, dbu, &design_netlist]() {
// build ports
auto db_ports = _idb_design->get_io_pin_list()->get_pin_list();
for (auto* db_port : db_ports) {
@@ -845,6 +847,11 @@ unsigned TimingIDBAdapter::convertDBToTimingNetlist(bool link_all_cell) {
Port sta_port(port_name.c_str(), io_type);
auto& created_port = design_netlist.addPort(std::move(sta_port));
crossRef(&created_port, db_port);

double port_x = db_port->get_average_coordinate()->get_x() / static_cast<double>(dbu);
double port_y = db_port->get_average_coordinate()->get_y() / static_cast<double>(dbu);

sta_port.set_coordinate(port_x, port_y);
}
};



+ 1
- 1
src/operation/iSTA/api/TimingIDBAdapter.hh View File

@@ -108,7 +108,7 @@ class TimingIDBAdapter : public TimingDBAdapter {
double getResistance(int num_layer, double segment_length,
std::optional<double> segment_width, int routing_layer_1st = 0);
double getCapacitance(int num_layer, double segment_length,
std::optional<double> segment_width);
std::optional<double> segment_width, int routing_layer_1st = 0);
double getAverageResistance(std::optional<double>& segment_width);
double getAverageCapacitance(std::optional<double>& segment_width);



+ 24
- 0
src/operation/iSTA/source/module/delay/ElmoreDelayCalc.cc View File

@@ -1337,6 +1337,30 @@ std::optional<double> RcNet::delay(DesignObject& to, DelayMethod delay_method) {
return delay(to.getFullName().c_str());
}

/**
* @brief get node elmore delay.
*
* @param node_name
* @param mode
* @param trans_type
* @return std::optional<double>
*/
std::optional<double> RcNet::delay(const char* node_name, AnalysisMode mode,
TransType trans_type) {
if (_rct.index() == 0) {
return std::nullopt;
}

auto node = std::get<RcTree>(_rct).node(node_name);
std::optional<double> delay;
if (!node) {
return std::nullopt;
}
delay = node->delay(mode, trans_type);

return delay;
}

std::optional<std::pair<double, Eigen::MatrixXd>> RcNet::delay(
DesignObject& to, double /* from_slew */,
std::optional<LibCurrentData*> /* output_current */, AnalysisMode mode,


+ 2
- 0
src/operation/iSTA/source/module/delay/ElmoreDelayCalc.hh View File

@@ -688,6 +688,8 @@ class RcNet {
std::optional<double> delay(const char* node_name,
DelayMethod delay_method = DelayMethod::kElmore);

std::optional<double> delay(const char* node_name, AnalysisMode mode, TransType trans_type);

std::optional<double> delay(DesignObject& to,
DelayMethod delay_method = DelayMethod::kElmore);
std::optional<double> delayNs(DesignObject& to, DelayMethod delay_method) {


+ 6
- 0
src/operation/iSTA/source/module/netlist/DesignObject.hh View File

@@ -26,6 +26,7 @@

#include <string>
#include <utility>
#include <optional>

#include "Type.hh"
#include "log/Log.hh"
@@ -47,6 +48,8 @@ class DesignObject {
DesignObject(DesignObject&& other) noexcept;
DesignObject& operator=(DesignObject&& rhs) noexcept;

using Coordinate = std::pair<double, double>;

virtual unsigned isNetlist() { return 0; }

virtual unsigned isPin() { return 0; }
@@ -112,6 +115,9 @@ class DesignObject {
return nullptr;
}

virtual void set_coordinate(double /*x*/, double /*y*/) { LOG_FATAL << "The func is not defined."; }
virtual std::optional<Coordinate> get_coordinate() { return std::nullopt; }

private:
std::string _name;
};


+ 2
- 4
src/operation/iSTA/source/module/netlist/Instance.hh View File

@@ -53,8 +53,6 @@ class Instance : public DesignObject {
friend PinIterator;
friend PinBusIterator;

using Coordinate = std::pair<double, double>;

unsigned isInstance() override { return 1; }

Pin* addPin(const char* name, LibPort* cell_port);
@@ -79,8 +77,8 @@ class Instance : public DesignObject {
return nullptr;
}

void set_coordinate(double x, double y) { _coordinate = {x, y}; }
auto get_coordinate() { return _coordinate; }
void set_coordinate(double x, double y) override { _coordinate = {x, y}; }
std::optional<Coordinate> get_coordinate() override { return _coordinate; }

private:
LibCell* _inst_cell;


+ 9
- 9
src/operation/iSTA/source/module/netlist/Netlist.hh View File

@@ -60,19 +60,19 @@ class Netlist : public DesignObject {
friend InstanceIterator;
friend NetIterator;

struct CoreSize {
struct DieSize {
double _width;
double _height;
};

unsigned isNetlist() override { return 1; }

auto get_core_size() { return _core_size; }
void set_core_size(double width, double height) {
CoreSize core_size;
core_size._width = width;
core_size._height = height;
_core_size = core_size;
auto get_die_size() { return _die_size; }
void set_die_size(double width, double height) {
DieSize die_size;
die_size._width = width;
die_size._height = height;
_die_size = die_size;
}

Port& addPort(Port&& port) {
@@ -197,8 +197,8 @@ class Netlist : public DesignObject {
std::list<Instance> _instances;
StrMap<Instance*> _str2instance;

std::optional<CoreSize>
_core_size; //!< The core size(width * weight) for FP.
std::optional<DieSize>
_die_size; //!< The core size(width * weight) for FP.

FORBIDDEN_COPY(Netlist);
};


+ 5
- 0
src/operation/iSTA/source/module/netlist/Pin.hh View File

@@ -76,6 +76,9 @@ class Pin : public DesignObject {
void set_pin_bus(PinBus* pin_bus) { _pin_bus = pin_bus; }
auto* get_pin_bus() { return _pin_bus; }

void set_coordinate(double x, double y) override { _coordinate = {x, y}; }
std::optional<Coordinate> get_coordinate() override { return _coordinate; }

std::string getFullName() override;

private:
@@ -84,6 +87,8 @@ class Pin : public DesignObject {
Instance* _own_instance = nullptr; //!< The pin owned by the instance.
PinBus* _pin_bus = nullptr; //!< The pin owned by the pin bus.

std::optional<Coordinate> _coordinate; //!< The pin coordinate.

unsigned _is_VDD : 1; //!< The pin is at a constant logic value 1.
unsigned _is_GND : 1; //!< The pin is at a constant logic value 0.
unsigned _reserverd : 30;


+ 3
- 1
src/operation/iSTA/source/module/netlist/Port.cc View File

@@ -26,7 +26,9 @@
namespace ista {

Port::Port(const char* name, PortDir port_dir)
: DesignObject(name), _port_dir(port_dir), _net(nullptr) {}
: DesignObject(name), _port_dir(port_dir), _net(nullptr) {
set_cap(0.0);
}

Port::Port(Port&& other) noexcept
: DesignObject(std::move(other)),


+ 6
- 1
src/operation/iSTA/source/module/netlist/Port.hh View File

@@ -46,7 +46,7 @@ class Port : public DesignObject {
explicit Port(const char* name, PortDir port_dir);
Port(Port&& other) noexcept;
Port& operator=(Port&& rhs) noexcept;
~Port() override = default;
~Port() override = default;

unsigned isPort() override { return 1; }
unsigned isPin() override { return 0; }
@@ -72,6 +72,9 @@ class Port : public DesignObject {
void set_port_bus(PortBus* port_bus) { _port_bus = port_bus; }
auto* get_port_bus() { return _port_bus; }

void set_coordinate(double x, double y) override { _coordinate = {x, y}; }
std::optional<Coordinate> get_coordinate() override { return _coordinate; }

private:
std::map<ModeTransPair, double> _caps;
PortDir _port_dir; //!< The port direction.
@@ -79,6 +82,8 @@ class Port : public DesignObject {

PortBus* _port_bus = nullptr; //!< The port owned by the port bus.

std::optional<Coordinate> _coordinate; //!< The pin coordinate.

FORBIDDEN_COPY(Port);
};



+ 1
- 1
src/operation/iSTA/source/module/python-api/PythonSta.cc View File

@@ -38,7 +38,7 @@ PYBIND11_MODULE(ista_cpp, m) {
m.def("read_sdc", read_sdc, py::arg("file_name"));
m.def("report_timing", report_timing);

m.def("get_core_size", get_core_size);
m.def("get_die_size", get_die_size);
m.def("display_timing_map", display_timing_map);
m.def("display_timing_tns_map", display_timing_tns_map);
m.def("display_slew_map", display_slew_map);


+ 4
- 4
src/operation/iSTA/source/module/python-api/PythonSta.hh View File

@@ -139,11 +139,11 @@ bool report_timing() {
*
* @return std::pair<int, int>
*/
std::pair<int, int> get_core_size() {
std::pair<double, double> get_die_size() {
auto* ista = ista::Sta::getOrCreateSta();
auto core_size = ista->get_netlist()->get_core_size();
if (core_size) {
return {(int)(core_size->_width), (int)(core_size->_height)};
auto die_size = ista->get_netlist()->get_die_size();
if (die_size) {
return {die_size->_width, die_size->_height};
}

return {0.0, 0.0};


+ 26
- 1
src/operation/iSTA/source/module/sta/Sta.cc View File

@@ -1903,6 +1903,10 @@ unsigned Sta::reportPath(const char *rpt_file_name, bool is_derate,
LOG_INFO << "\n" << _report_tbl_summary->c_str();
LOG_INFO << "\n" << _report_tbl_TNS->c_str();

Time::stop();
double elapsed_time = Time::elapsedTime();
LOG_INFO << "iSTA total elapsed time: " << elapsed_time << " seconds";

auto close_file = [](std::FILE *fp) { std::fclose(fp); };

std::unique_ptr<std::FILE, decltype(close_file)> f(
@@ -1910,6 +1914,7 @@ unsigned Sta::reportPath(const char *rpt_file_name, bool is_derate,

std::fprintf(f.get(), "Generate the report at %s, GitVersion: %s.\n",
Time::getNowWallTime(), GIT_VERSION);
std::fprintf(f.get(), "iSTA elapsed time: %.2f seconds.\n", elapsed_time);
std::fprintf(f.get(), "%s", _report_tbl_summary->c_str()); // WNS
// report_TNS;
std::fprintf(f.get(), "%s", _report_tbl_TNS->c_str());
@@ -1919,7 +1924,7 @@ unsigned Sta::reportPath(const char *rpt_file_name, bool is_derate,
}

if (isJsonReportEnabled()) {
nlohmann::json dump_json;
json dump_json;
dump_json["summary"] = _summary_json_report;
dump_json["slack"] = _slack_json_report;
dump_json["detail"] = _detail_json_report;
@@ -3079,6 +3084,26 @@ unsigned Sta::reportTiming(std::set<std::string> &&exclude_cell_names /*= {}*/,
// for test dump json data.
// reportWirePaths();

// for test dump graph json data.
if (0) {
json graph_json;
StaDumpGraphJson dump_graph_json(graph_json);
auto& the_graph = get_graph();
dump_graph_json(&the_graph);

std::string graph_json_file_name =
Str::printf("%s/%s_graph.json", design_work_space, get_design_name().c_str());

std::ofstream out_file(graph_json_file_name);
if (out_file.is_open()) {
out_file << graph_json.dump(4); // 4 spaces indent
LOG_INFO << "JSON report written to: " << graph_json_file_name;
out_file.close();
} else {
LOG_ERROR << "Failed to open JSON report file: " << graph_json_file_name;
}
}

#if CUDA_PROPAGATION
// printFlattenData();
#endif


+ 20
- 18
src/operation/iSTA/source/module/sta/Sta.hh View File

@@ -506,7 +506,8 @@ class Sta {
}
auto& get_report_spec() { return _report_spec; }

unsigned reportPath(const char* rpt_file_name, bool is_derate = true, bool only_wire_path = false);
unsigned reportPath(const char* rpt_file_name, bool is_derate = true,
bool only_wire_path = false);
unsigned reportTrans(const char* rpt_file_name);
unsigned reportCap(const char* rpt_file_name, bool is_clock_cap);
unsigned reportFanout(const char* rpt_file_name);
@@ -583,7 +584,8 @@ class Sta {
bool is_derate = false, bool is_clock_cap = false,
bool is_copy = true);

std::vector<StaPathWireTimingData> reportTimingData(unsigned n_worst_path_per_clock);
std::vector<StaPathWireTimingData> reportTimingData(
unsigned n_worst_path_per_clock);
unsigned reportUsedLibs();
unsigned reportWirePaths();

@@ -625,9 +627,9 @@ class Sta {

bool isJsonReportEnabled() const { return _is_json_report_enabled; }

nlohmann::json& getSummaryJsonReport() { return _summary_json_report; }
nlohmann::json& getSlackJsonReport() { return _slack_json_report; }
nlohmann::json& getDetailJsonReport() { return _detail_json_report; }
auto& getSummaryJsonReport() { return _summary_json_report; }
auto& getSlackJsonReport() { return _slack_json_report; }
auto& getDetailJsonReport() { return _detail_json_report; }

private:
Sta();
@@ -710,24 +712,24 @@ class Sta {
// Singleton sta.
static Sta* _sta;

bool _is_json_report_enabled = false; //!< The json report enable flag.
nlohmann::json _summary_json_report =
nlohmann::json::array(); //!< The json data
nlohmann::json _slack_json_report =
nlohmann::json::array(); //!< The json data
nlohmann::json _detail_json_report =
nlohmann::json::array(); //!< The json data for detailed report.
using json = nlohmann::ordered_json;
bool _is_json_report_enabled = true; //!< The json report enable flag.
json _summary_json_report = json::array(); //!< The json data
json _slack_json_report = json::array(); //!< The json data
json _detail_json_report =
json::array(); //!< The json data for detailed report.

#if CUDA_PROPAGATION
std::vector<GPU_Vertex> _gpu_vertices; //!< gpu flatten vertex, arc data.
std::vector<GPU_Arc> _gpu_arcs;
GPU_Flatten_Data _flatten_data;
GPU_Graph _gpu_graph; //!< The gpu graph mapped to sta graph.
std::vector<Lib_Arc_GPU> _lib_gpu_arcs; //!< The gpu lib arc data.
Lib_Data_GPU _gpu_lib_data; //!< The gpu lib arc data.
std::vector<Lib_Table_GPU> _lib_gpu_tables; //!< The gpu lib table data.
std::vector<Lib_Table_GPU*> _lib_gpu_table_ptrs; //!< The gpu lib table data.
std::map<StaArc*, unsigned> _arc_to_index; //!< The arc map to gpu index.
GPU_Graph _gpu_graph; //!< The gpu graph mapped to sta graph.
std::vector<Lib_Arc_GPU> _lib_gpu_arcs; //!< The gpu lib arc data.
Lib_Data_GPU _gpu_lib_data; //!< The gpu lib arc data.
std::vector<Lib_Table_GPU> _lib_gpu_tables; //!< The gpu lib table data.
std::vector<Lib_Table_GPU*> _lib_gpu_table_ptrs; //!< The gpu lib table data.
std::map<StaArc*, unsigned> _arc_to_index; //!< The arc map to gpu index.
std::map<StaPathDelayData*, unsigned>
_at_to_index; //!< The at map to gpu index.
std::map<unsigned, StaPathDelayData*>


+ 4
- 5
src/operation/iSTA/source/module/sta/StaArc.cc View File

@@ -65,7 +65,7 @@ int StaArc::get_arc_delay(AnalysisMode analysis_mode, TransType trans_type) {

/**
* @brief init arc delay data.
*
*
*/
void StaArc::initArcDelayData() {
auto& delay_bucket = getDataBucket();
@@ -128,10 +128,11 @@ StaNetArc::StaNetArc(StaVertex* driver, StaVertex* load, Net* net)
: StaArc(driver, load), _net(net) {}

StaInstArc::StaInstArc(StaVertex* src, StaVertex* snk, LibArc* lib_arc,
Instance* inst)
LibArcSet* lib_arc_set, Instance* inst)
: StaArc(src, snk),
_lib_arc(lib_arc),
_inst(inst){}
_lib_arc_set(lib_arc_set),
_inst(inst) {}

// for debug by printLIBTableGPU.(to be deleted)
void printLibTableGPU(const Lib_Table_GPU& gpu_table) {
@@ -174,6 +175,4 @@ void printLibTableGPU(const Lib_Table_GPU& gpu_table) {
std::cout << ");" << std::endl;
}



} // namespace ista

+ 5
- 3
src/operation/iSTA/source/module/sta/StaArc.hh View File

@@ -191,14 +191,15 @@ class StaNetArc : public StaArc {
*/
class StaInstArc : public StaArc {
public:
StaInstArc(StaVertex* src, StaVertex* snk, LibArc* lib_arc, Instance* inst);
StaInstArc(StaVertex* src, StaVertex* snk, LibArc* lib_arc,
LibArcSet* lib_arc_set, Instance* inst);
~StaInstArc() override = default;
// ~StaInstArc() override = default;

unsigned isInstArc() const override { return 1; }

LibArc* get_lib_arc() { return _lib_arc; }
void set_lib_arc(LibArc* lib_arc) { _lib_arc = lib_arc; }
auto* get_lib_arc_set() { return _lib_arc_set; }

unsigned isDelayArc() const override { return _lib_arc->isDelayArc(); }
unsigned isCheckArc() const override { return _lib_arc->isCheckArc(); }
@@ -245,11 +246,12 @@ class StaInstArc : public StaArc {

private:
LibArc* _lib_arc; //!< The mapped to lib arc.
LibArcSet* _lib_arc_set; //!< The mapped to lib arc set.
Instance* _inst; //!< The owned inst.

#if CUDA_PROPAGATION
Lib_Arc_GPU* _lib_gpu_arc = nullptr; //!< The gpu lib arc.
int _lib_arc_id = -1; //!< The arc id for gpu lib data.
int _lib_arc_id = -1; //!< The arc id for gpu lib data.
#endif

FORBIDDEN_COPY(StaInstArc);


+ 4
- 3
src/operation/iSTA/source/module/sta/StaBuildGraph.cc View File

@@ -96,7 +96,8 @@ unsigned StaBuildGraph::buildInst(StaGraph* the_graph, Instance* inst) {

// lambda function, build one inst arc.
auto build_inst_arc = [the_graph, inst](Pin* src_pin, Pin* snk_pin,
LibArc* cell_arc) {
LibArc* cell_arc,
LibArcSet* cell_arc_set) {
auto src_vertex = the_graph->findVertex(src_pin);
LOG_FATAL_IF(!src_vertex);

@@ -110,7 +111,7 @@ unsigned StaBuildGraph::buildInst(StaGraph* the_graph, Instance* inst) {
}

auto inst_arc =
std::make_unique<StaInstArc>(*src_vertex, *snk_vertex, cell_arc, inst);
std::make_unique<StaInstArc>(*src_vertex, *snk_vertex, cell_arc, cell_arc_set, inst);
(*src_vertex)->addSrcArc(inst_arc.get());
(*snk_vertex)->addSnkArc(inst_arc.get());

@@ -180,7 +181,7 @@ unsigned StaBuildGraph::buildInst(StaGraph* the_graph, Instance* inst) {
continue;
}

build_inst_arc(*src_pin, *snk_pin, cell_arc);
build_inst_arc(*src_pin, *snk_pin, cell_arc, cell_arc_set.get());
}
}
}


+ 9
- 2
src/operation/iSTA/source/module/sta/StaDataSlewDelayPropagation.cc View File

@@ -122,6 +122,7 @@ unsigned StaDataSlewDelayPropagation::operator()(StaArc* the_arc) {
if (the_arc->isInstArc()) {
auto* inst_arc = dynamic_cast<StaInstArc*>(the_arc);
auto* lib_arc = inst_arc->get_lib_arc();
auto* lib_arc_set = dynamic_cast<StaInstArc*>(the_arc)->get_lib_arc_set();
auto* the_lib = lib_arc->get_owner_cell()->get_owner_lib();

if (the_arc->isCheckArc()) {
@@ -142,8 +143,11 @@ unsigned StaDataSlewDelayPropagation::operator()(StaArc* the_arc) {
auto snk_slew_fs =
dynamic_cast<StaSlewData*>(snk_slew_data)->get_slew();
auto snk_slew = FS_TO_NS(snk_slew_fs);
auto delay_ns = lib_arc->getDelayOrConstrainCheckNs(
auto delay_values = lib_arc_set->getDelayOrConstrainCheckNs(
snk_trans_type, in_slew, snk_slew);
double delay_ns = analysis_mode == AnalysisMode::kMax
? delay_values.front()
: delay_values.back();
auto delay = NS_TO_FS(delay_ns);

StaArcDelayData* arc_delay = nullptr;
@@ -190,8 +194,11 @@ unsigned StaDataSlewDelayPropagation::operator()(StaArc* the_arc) {
auto output_current =
lib_arc->getOutputCurrent(out_trans_type, in_slew, load);

auto delay_ns = lib_arc->getDelayOrConstrainCheckNs(out_trans_type,
auto delay_values = lib_arc_set->getDelayOrConstrainCheckNs(out_trans_type,
in_slew, load);
double delay_ns = analysis_mode == AnalysisMode::kMax
? delay_values.front()
: delay_values.back();
auto delay = NS_TO_FS(delay_ns);

construct_slew_delay_data(analysis_mode, out_trans_type, snk_vertex,


+ 11
- 3
src/operation/iSTA/source/module/sta/StaDelayPropagation.cc View File

@@ -83,6 +83,7 @@ unsigned StaDelayPropagation::operator()(StaArc* the_arc) {

if (the_arc->isInstArc()) {
auto* lib_arc = dynamic_cast<StaInstArc*>(the_arc)->get_lib_arc();
auto* lib_arc_set = dynamic_cast<StaInstArc*>(the_arc)->get_lib_arc_set();
/*The check arc is the end of the recursion .*/
if (the_arc->isCheckArc()) {
// Since slew is fitter accord trigger type, May be do not need below
@@ -102,8 +103,11 @@ unsigned StaDelayPropagation::operator()(StaArc* the_arc) {
auto snk_slew_fs =
dynamic_cast<StaSlewData*>(snk_slew_data)->get_slew();
auto snk_slew = FS_TO_NS(snk_slew_fs);
auto delay_ns = lib_arc->getDelayOrConstrainCheckNs(
auto delay_values = lib_arc_set->getDelayOrConstrainCheckNs(
snk_trans_type, in_slew, snk_slew);
double delay_ns = analysis_mode == AnalysisMode::kMax
? delay_values.front()
: delay_values.back();
auto delay = NS_TO_FS(delay_ns);
construct_delay_data(analysis_mode, snk_trans_type, the_arc, delay);
}
@@ -130,9 +134,13 @@ unsigned StaDelayPropagation::operator()(StaArc* the_arc) {
if (!lib_arc->isMatchTimingType(out_trans_type)) {
continue;
}

auto delay_ns = lib_arc->getDelayOrConstrainCheckNs(out_trans_type,
// assure delay values sort by descending order.
auto delay_values = lib_arc_set->getDelayOrConstrainCheckNs(out_trans_type,
in_slew, load);
double delay_ns = analysis_mode == AnalysisMode::kMax
? delay_values.front()
: delay_values.back();
auto delay = NS_TO_FS(delay_ns);

construct_delay_data(analysis_mode, out_trans_type, the_arc, delay);


+ 512
- 0
src/operation/iSTA/source/module/sta/StaDump.cc View File

@@ -594,4 +594,516 @@ unsigned StaDumpTimingData::operator()(StaArc* the_arc) {
return 1;
}

/**
* @brief dump arc in edge connection in json.
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpEdges(StaGraph* the_graph) {
auto& the_vertexes = the_graph->get_vertexes();
std::map<StaVertex*, int> vertex_id_map;
int vertex_index = 0;
for (auto& the_vertex : the_vertexes) {
vertex_id_map[the_vertex.get()] = vertex_index++;
}

json edges;

auto& the_arcs = the_graph->get_arcs();
for (auto& the_arc : the_arcs) {
if (the_arc->isDelayArc()) {
int src_id = vertex_id_map[the_arc->get_src()];
int snk_id = vertex_id_map[the_arc->get_snk()];

if (the_arc->isInstArc() && the_arc->isDelayArc()) {
edges["cell_out"]["src"].push_back(src_id);
edges["cell_out"]["dst"].push_back(snk_id);
} else if (the_arc->isNetArc()) {
edges["net_out"]["src"].push_back(src_id);
edges["net_out"]["dst"].push_back(snk_id);

// reverse direction for net out
edges["net_in"]["src"].push_back(snk_id);
edges["net_in"]["dst"].push_back(src_id);
}

}

}
return edges;
}

/**
* @brief dump all node require arrive time data in json.
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpNodeRAT(StaGraph* the_graph) {
auto& the_vertexes = the_graph->get_vertexes();
const double inf = 1.1e20;
json all_vertex_rat_array = json::array();

for (auto& the_vertex : the_vertexes) {
json one_vertex_rat_array = json::array();

double max_rise_rat =
the_vertex->getReqTimeNs(AnalysisMode::kMax, TransType::kRise)
.value_or(inf);
double max_fall_rat =
the_vertex->getReqTimeNs(AnalysisMode::kMax, TransType::kFall)
.value_or(inf);
double min_rise_rat =
the_vertex->getReqTimeNs(AnalysisMode::kMin, TransType::kRise)
.value_or(inf);
double min_fall_rat =
the_vertex->getReqTimeNs(AnalysisMode::kMin, TransType::kFall)
.value_or(inf);
// min first
one_vertex_rat_array.push_back(min_rise_rat);
one_vertex_rat_array.push_back(min_fall_rat);
one_vertex_rat_array.push_back(max_rise_rat);
one_vertex_rat_array.push_back(max_fall_rat);

all_vertex_rat_array.push_back(one_vertex_rat_array);
}

return all_vertex_rat_array;
}

/**
* @brief dump node net delay data in json.
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpNodeNetDelay(StaGraph* the_graph) {
auto& the_vertexes = the_graph->get_vertexes();
json all_vertex_node_net_delay_array = json::array();

for (auto& the_vertex : the_vertexes) {
json one_vertex_net_delay_array = json::array();

auto* the_obj = the_vertex->get_design_obj();
std::string obj_name = the_obj->getFullName();
auto* the_net = the_obj->get_net();
auto* rc_net = getSta()->getRcNet(the_net);
auto* rc_tree = rc_net->rct();

double max_rise_delay = 0.0;
double max_fall_delay = 0.0;
double min_rise_delay = 0.0;
double min_fall_delay = 0.0;
if (rc_tree) {
max_rise_delay = rc_tree->delay(obj_name.c_str(), AnalysisMode::kMax,
TransType::kRise);
max_fall_delay = rc_tree->delay(obj_name.c_str(), AnalysisMode::kMax,
TransType::kFall);
min_rise_delay = rc_tree->delay(obj_name.c_str(), AnalysisMode::kMin,
TransType::kRise);
min_fall_delay = rc_tree->delay(obj_name.c_str(), AnalysisMode::kMin,
TransType::kFall);
}

// min first
one_vertex_net_delay_array.push_back(min_rise_delay);
one_vertex_net_delay_array.push_back(min_fall_delay);
one_vertex_net_delay_array.push_back(max_rise_delay);
one_vertex_net_delay_array.push_back(max_fall_delay);

all_vertex_node_net_delay_array.push_back(one_vertex_net_delay_array);
}

return all_vertex_node_net_delay_array;
}

/**
* @brief dump all node arrive time data in json.
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpNodeAT(StaGraph* the_graph) {
auto& the_vertexes = the_graph->get_vertexes();
const double inf = 1.1e20;
json all_vertex_at_array = json::array();

for (auto& the_vertex : the_vertexes) {
json one_vertex_at_array = json::array();

double max_rise_at =
the_vertex->getArriveTimeNs(AnalysisMode::kMax, TransType::kRise)
.value_or(inf);
double max_fall_at =
the_vertex->getArriveTimeNs(AnalysisMode::kMax, TransType::kFall)
.value_or(inf);
double min_rise_at =
the_vertex->getArriveTimeNs(AnalysisMode::kMin, TransType::kRise)
.value_or(inf);
double min_fall_at =
the_vertex->getArriveTimeNs(AnalysisMode::kMin, TransType::kFall)
.value_or(inf);

// min first
one_vertex_at_array.push_back(min_rise_at);
one_vertex_at_array.push_back(min_fall_at);
one_vertex_at_array.push_back(max_rise_at);
one_vertex_at_array.push_back(max_fall_at);

all_vertex_at_array.push_back(one_vertex_at_array);
}

return all_vertex_at_array;
}

/**
* @brief dump all node slew data in json.
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpNodeSlew(StaGraph* the_graph) {
auto& the_vertexes = the_graph->get_vertexes();
const double inf = 1.1e20;
json all_vertex_slew_array = json::array();

for (auto& the_vertex : the_vertexes) {
json one_vertex_slew_array = json::array();

double max_rise_slew =
the_vertex->getSlewNs(AnalysisMode::kMax, TransType::kRise)
.value_or(inf);
double max_fall_slew =
the_vertex->getSlewNs(AnalysisMode::kMax, TransType::kFall)
.value_or(inf);
double min_rise_slew =
the_vertex->getSlewNs(AnalysisMode::kMin, TransType::kRise)
.value_or(inf);
double min_fall_slew =
the_vertex->getSlewNs(AnalysisMode::kMin, TransType::kFall)
.value_or(inf);

// min first
one_vertex_slew_array.push_back(min_rise_slew);
one_vertex_slew_array.push_back(min_fall_slew);
one_vertex_slew_array.push_back(max_rise_slew);
one_vertex_slew_array.push_back(max_fall_slew);

all_vertex_slew_array.push_back(one_vertex_slew_array);
}

return all_vertex_slew_array;
}

/**
* @brief dump the node feature data in json include is_pin_port,
* is_fanin_out(input or output), distance to 4 die boundary, pin capacitance.
* @ref Guo etc, DAC22 "A Timing Engine Inspired Graph Neural Network Model for
* Pre-Routing Slack Prediction"
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpNodeFeature(StaGraph* the_graph) {
auto& the_vertexes = the_graph->get_vertexes();
json all_vertex_node_feature_array = json::array();
auto* nl = the_graph->get_nl();
auto [die_width, die_height] = nl->get_die_size().value();

for (auto& the_vertex : the_vertexes) {
json one_node_feature_array = json::array();
auto* the_obj = the_vertex->get_design_obj();
the_obj->isPort() ? one_node_feature_array.push_back(1.0) // is_port
: one_node_feature_array.push_back(0.0);
the_obj->isInput() ? one_node_feature_array.push_back(1.0) // is_input
: one_node_feature_array.push_back(0.0);
// the distance to 4 die boundary, left, right, top, bottom TBD.
if (the_obj->get_coordinate()) {
auto [pin_x, pin_y] = the_obj->get_coordinate().value();
double left_bottom_distance = pin_x + pin_y;
double right_bottom_distance = die_width - pin_x + pin_y;
double left_top_distance = pin_x + die_height - pin_y;
double right_top_distance = die_width - pin_x + die_height - pin_y;

// the order is lb(left bottom), rt, rb, lt
one_node_feature_array.push_back(left_bottom_distance);
one_node_feature_array.push_back(right_top_distance);
one_node_feature_array.push_back(right_bottom_distance);
one_node_feature_array.push_back(left_top_distance);

} else {
// assume the non-pin node is in the left bottom of the die.
one_node_feature_array.push_back(0.0);
one_node_feature_array.push_back(die_width + die_height);
one_node_feature_array.push_back(die_width);
one_node_feature_array.push_back(die_height);
}

// TODO(to taosimin), min or max first? assume max first
double max_rise_cap =
the_vertex->getLoad(AnalysisMode::kMax, TransType::kRise);
double max_fall_cap =
the_vertex->getLoad(AnalysisMode::kMax, TransType::kFall);
double min_rise_cap =
the_vertex->getLoad(AnalysisMode::kMin, TransType::kRise);
double min_fall_cap =
the_vertex->getLoad(AnalysisMode::kMin, TransType::kFall);

one_node_feature_array.push_back(min_rise_cap);
one_node_feature_array.push_back(min_fall_cap);

one_node_feature_array.push_back(max_rise_cap);
one_node_feature_array.push_back(max_fall_cap);

all_vertex_node_feature_array.push_back(one_node_feature_array);
}
return all_vertex_node_feature_array;
}

/**
* @brief dump the node is_end_point data in json.
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpNodeIsEndPoint(
StaGraph* the_graph) {
auto& the_vertexes = the_graph->get_vertexes();
json all_vertex_node_is_ep_array = json::array();

for (auto& the_vertex : the_vertexes) {
all_vertex_node_is_ep_array.push_back(the_vertex->is_end() ? 1.0 : 0.0);
}
return all_vertex_node_is_ep_array;
}

/**
* @brief dump all instance arc delay data in json.
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpInstArcDelay(StaGraph* the_graph) {
auto& the_arcs = the_graph->get_arcs();
json all_inst_arc_delay_array = json::array();

for (auto& the_arc : the_arcs) {
if (the_arc->isInstArc() && the_arc->isDelayArc()) {
json one_inst_arc_delay_array = json::array();

double max_rise_delay = FS_TO_NS(
the_arc->get_arc_delay(AnalysisMode::kMax, TransType::kRise));
double max_fall_delay = FS_TO_NS(
the_arc->get_arc_delay(AnalysisMode::kMax, TransType::kFall));
double min_rise_delay = FS_TO_NS(
the_arc->get_arc_delay(AnalysisMode::kMin, TransType::kRise));
double min_fall_delay = FS_TO_NS(
the_arc->get_arc_delay(AnalysisMode::kMin, TransType::kFall));

// min first
one_inst_arc_delay_array.push_back(min_rise_delay);
one_inst_arc_delay_array.push_back(min_fall_delay);
one_inst_arc_delay_array.push_back(max_rise_delay);
one_inst_arc_delay_array.push_back(max_fall_delay);

all_inst_arc_delay_array.push_back(one_inst_arc_delay_array);
}
}

return all_inst_arc_delay_array;
}

/**
* @brief dump inst arc lib table data in json.
* @ref Guo etc, DAC22 "A Timing Engine Inspired Graph Neural Network Model for
* Pre-Routing Slack Prediction"
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpInstArcFeature(
StaGraph* the_graph) {
auto& the_arcs = the_graph->get_arcs();
json all_inst_arc_lib_data_array = json::array();

for (auto& the_arc : the_arcs) {
if (the_arc->isInstArc() && the_arc->isDelayArc()) {
json one_inst_arc_table_array = json::array();

auto* the_lib_arc =
dynamic_cast<StaInstArc*>(the_arc.get())->get_lib_arc();
auto* delay_model =
dynamic_cast<LibDelayTableModel*>(the_lib_arc->get_table_model());
auto& delay_tables = delay_model->get_tables();

std::vector<LibTable*> store_tables;
int duplicate = 2; // hard code 2 table duplicate for 2 corner
for (int i = 0; i < duplicate; ++i) {
for (auto& delay_table : delay_tables) {
store_tables.push_back(delay_table.get());
}
}

for (auto* delay_table : store_tables) {
// copy axies
if (delay_table) {
double is_valid = 1.0;
one_inst_arc_table_array.push_back(is_valid);
auto& table_axes = delay_table->get_axes();
for (auto& table_axis : table_axes) {
auto& axis_values = table_axis->get_axis_values();
for (auto& axis_value : axis_values) {
double data_value = axis_value->getFloatValue();
one_inst_arc_table_array.push_back(data_value);
}
}
} else {
double is_valid = 0.0;
one_inst_arc_table_array.push_back(is_valid);
// hard code 2 axis, 7*2 data
for (int i = 0; i < 7 * 2; i++) {
double data_value = 0.0;
one_inst_arc_table_array.push_back(data_value);
}
}
}

// copy table values
for (auto* delay_table : store_tables) {
auto& table_values = delay_table->get_table_values();
for (auto& table_value : table_values) {
one_inst_arc_table_array.push_back(table_value->getFloatValue());
}
}

all_inst_arc_lib_data_array.push_back(one_inst_arc_table_array);
}
}

return all_inst_arc_lib_data_array;
}

/**
* @brief dump net arc feature, the Manhattan distance between the positions of
a net’s drive pin and its sink pin.(driver -> sink)
* @ref Guo etc, DAC22 "A Timing Engine Inspired Graph Neural Network Model for
* Pre-Routing Slack Prediction"
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpNetInArcFeature(
StaGraph* the_graph) {
auto& the_arcs = the_graph->get_arcs();
json all_net_arc_feature_array = json::array();

for (auto& the_arc : the_arcs) {
if (the_arc->isNetArc()) {
json one_net_arc_feature_array = json::array();

auto* the_net_arc = dynamic_cast<StaNetArc*>(the_arc.get());
auto* src = the_net_arc->get_src();
auto* snk = the_net_arc->get_snk();

auto* src_obj = src->get_design_obj();
auto* snk_obj = snk->get_design_obj();

auto src_coord = src_obj->get_coordinate();
auto snk_coord = snk_obj->get_coordinate();

double distance_x = src_coord->first - snk_coord->first;
double distance_y = src_coord->second - snk_coord->second;

one_net_arc_feature_array.push_back(distance_x);
one_net_arc_feature_array.push_back(distance_y);

all_net_arc_feature_array.push_back(one_net_arc_feature_array);
}
}

return all_net_arc_feature_array;
}

/**
* @brief The net out is net arc reverse, from sink->driver.
*
* @param the_graph
* @return StaDumpGraphJson::json
*/
StaDumpGraphJson::json StaDumpGraphJson::dumpNetOutArcFeature(
StaGraph* the_graph) {
auto& the_arcs = the_graph->get_arcs();
json all_net_arc_feature_array = json::array();

for (auto& the_arc : the_arcs) {
if (the_arc->isNetArc()) {
json one_net_arc_feature_array = json::array();

auto* the_net_arc = dynamic_cast<StaNetArc*>(the_arc.get());
auto* src = the_net_arc->get_src();
auto* snk = the_net_arc->get_snk();

auto* src_obj = src->get_design_obj();
auto* snk_obj = snk->get_design_obj();

auto src_coord = src_obj->get_coordinate();
auto snk_coord = snk_obj->get_coordinate();

double distance_x = snk_coord->first - src_coord->first;
double distance_y = snk_coord->second - src_coord->second;

one_net_arc_feature_array.push_back(distance_x);
one_net_arc_feature_array.push_back(distance_y);

all_net_arc_feature_array.push_back(one_net_arc_feature_array);
}
}

return all_net_arc_feature_array;
}

/**
* @brief dump the graph json for get graph timing data.
*
* @param the_graph
* @return unsigned
*/
unsigned StaDumpGraphJson::operator()(StaGraph* the_graph) {
LOG_INFO << "dump graph json start";

unsigned num_nodes = the_graph->numVertex();
_json_file["num_nodes"] = num_nodes;

_json_file["edges"] = dumpEdges(the_graph);

// dump node features
auto n_rats = dumpNodeRAT(the_graph);
auto n_net_delays = dumpNodeNetDelay(the_graph);
auto n_ats = dumpNodeAT(the_graph);
auto n_slews = dumpNodeSlew(the_graph);
auto n_node_features = dumpNodeFeature(the_graph);
auto n_is_timing_endpt = dumpNodeIsEndPoint(the_graph);

_json_file["node_features"]["n_rats"] = n_rats;
_json_file["node_features"]["n_net_delays"] = n_net_delays;
_json_file["node_features"]["n_ats"] = n_ats;
_json_file["node_features"]["n_slews"] = n_slews;
_json_file["node_features"]["nf"] = n_node_features;
_json_file["node_features"]["n_is_timing_endpt"] = n_is_timing_endpt;

// dump arc features
auto e_inst_arc_delays = dumpInstArcDelay(the_graph);
auto e_inst_arc_features = dumpInstArcFeature(the_graph);
auto e_net_in_arc_features = dumpNetInArcFeature(the_graph);
auto e_net_out_arc_features = dumpNetOutArcFeature(the_graph);

_json_file["edge_features"]["cell_out"]["e_cell_delays"] = e_inst_arc_delays;
_json_file["edge_features"]["cell_out"]["ef"] = e_inst_arc_features;
_json_file["edge_features"]["net_in"]["ef"] = e_net_in_arc_features;
_json_file["edge_features"]["net_out"]["ef"] = e_net_out_arc_features;

LOG_INFO << "dump graph json end";

return 1;
}

} // namespace ista

+ 31
- 0
src/operation/iSTA/source/module/sta/StaDump.hh View File

@@ -158,4 +158,35 @@ class StaDumpTimingData : public StaFunc {
TransType _trans_type;
};

/**
* @brief dump the graph json for get graph timing data.
*
*/
class StaDumpGraphJson : public StaFunc {
public:
using json = nlohmann::ordered_json;
StaDumpGraphJson(json& json_file) : _json_file(json_file) {}
~StaDumpGraphJson() override = default;

unsigned operator()(StaGraph* the_graph) override;

private:
json dumpEdges(StaGraph* the_graph);

json dumpNodeRAT(StaGraph* the_graph);
json dumpNodeNetDelay(StaGraph* the_graph);
json dumpNodeAT(StaGraph* the_graph);
json dumpNodeSlew(StaGraph* the_graph);
json dumpNodeFeature(StaGraph* the_graph);
json dumpNodeIsEndPoint(StaGraph* the_graph);

json dumpInstArcDelay(StaGraph* the_graph);
json dumpInstArcFeature(StaGraph* the_graph);

json dumpNetInArcFeature(StaGraph* the_graph);
json dumpNetOutArcFeature(StaGraph* the_graph);

json& _json_file;
};

} // namespace ista

+ 13
- 14
src/operation/iSTA/source/module/sta/StaReport.cc View File

@@ -778,12 +778,12 @@ unsigned StaReportPathDetailJson::operator()(StaSeqPathData* seq_path_data) {
};

auto print_path_data = [&](auto& path_stack, auto clock_path_arrive_time,
nlohmann::json& path_json) {
json& path_json) {
double last_arrive_time = 0;
StaVertex* last_vertex = nullptr;

auto& detail_json = path_json["detail"];
auto& summary_json = path_json["summary"] = nlohmann::json::array();
auto& summary_json = path_json["summary"] = json::array();

// Helper function to extract module name from hierarchical vertex name
bool failed_extract_module_name = false;
@@ -866,7 +866,7 @@ unsigned StaReportPathDetailJson::operator()(StaSeqPathData* seq_path_data) {
// The arrive time
std::stack<StaPathDelayData*> path_stack = seq_path_data->getPathDelayData();

nlohmann::json path_json = nlohmann::json::object();
json path_json = json::object();

// Set the clock domain
auto* capture_clock = seq_path_data->get_capture_clock();
@@ -1055,17 +1055,17 @@ unsigned StaReportWirePathYaml::operator()(StaSeqPathData* seq_path_data) {
}

StaReportWirePathJson::StaReportWirePathJson(const char* rpt_file_name,
AnalysisMode analysis_mode,
unsigned n_worst)
AnalysisMode analysis_mode,
unsigned n_worst)
: StaReportPathDump(rpt_file_name, analysis_mode, n_worst) {}

/**
* @brief print timing path in json in wire level.
*
* @param seq_path_data
* @return unsigned
*
* @param seq_path_data
* @return unsigned
*/
unsigned StaReportWirePathJson::operator()(StaSeqPathData* seq_path_data) {
unsigned StaReportWirePathJson::operator()(StaSeqPathData* seq_path_data) {
// CPU_PROF_START(0);
std::string design_work_space =
ista::Sta::getOrCreateSta()->get_design_work_space();
@@ -1074,9 +1074,9 @@ unsigned StaReportWirePathJson::operator()(StaSeqPathData* seq_path_data) {
std::filesystem::create_directories(path_dir);

static unsigned file_id = 1;
std::string text_file_name = Str::printf(
"%s/wire_path_%d.json", path_dir.c_str(), file_id++);
std::string text_file_name =
Str::printf("%s/wire_path_%d.json", path_dir.c_str(), file_id++);
json path_json = json::array();
StaDumpWireJson dump_wire_json(path_json);
std::stack<StaPathDelayData*> path_stack = seq_path_data->getPathDelayData();
@@ -1105,12 +1105,11 @@ unsigned StaReportWirePathJson::operator()(StaSeqPathData* seq_path_data) {
file << path_json.dump(4) << std::endl;

file.close();
LOG_INFO << "output json file path: " << text_file_name;

// CPU_PROF_END(0, "dump one timing path wire yaml");
return 1;

}

StaReportPathTimingData::StaReportPathTimingData(const char* rpt_file_name,


+ 2
- 0
src/operation/iSTA/source/module/sta/StaReport.hh View File

@@ -143,6 +143,8 @@ class StaReportPathDetailJson : public StaReportPathDetail {
~StaReportPathDetailJson() override = default;

unsigned operator()(StaSeqPathData* seq_path_data) override;

using json = nlohmann::ordered_json;
};

/**


+ 1
- 1
src/operation/iSTA/source/module/sta/StaVertex.cc View File

@@ -842,7 +842,7 @@ double StaVertex::getLoad(AnalysisMode analysis_mode, TransType trans_type) {
load_or_cap = rc_net ? rc_net->load(analysis_mode, trans_type)
: the_net->getLoad(analysis_mode, trans_type);
} else {
load_or_cap = obj->cap();
load_or_cap = obj->cap(analysis_mode, trans_type);
}
return load_or_cap;
}


+ 20
- 0
src/platform/tool_manager/tool_api/ipl_io/ipl_io.cpp View File

@@ -72,6 +72,26 @@ bool PlacerIO::runPlacement(std::string config, bool enableJsonOutput)
return true;
}

bool PlacerIO::runAiPlacement(std::string config, std::string onnx_path, std::string normalization_path)
{
if (!iPLAPIInst.isPlacerDBStarted()) {
this->initPlacer(config);
} else {
iPLAPIInst.updatePlacerDB();
}

ieda::Stats stats;
iPLAPIInst.runAiFlow(onnx_path, normalization_path);

flowConfigInst->add_status_runtime(stats.elapsedRunTime());
flowConfigInst->set_status_memmory(stats.memoryDelta());

// destroyPlacer();

return true;
}


////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


+ 1
- 0
src/platform/tool_manager/tool_api/ipl_io/ipl_io.h View File

@@ -70,6 +70,7 @@ class PlacerIO
void initPlacer(std::string config);
void destroyPlacer();
bool runPlacement(std::string config, bool enableJsonOutput = false);
bool runAiPlacement(std::string config, std::string onnx_path, std::string normalization_path);
bool runIncrementalLegalization();
bool runIncrementalLegalization(std::vector<std::string>& changed_inst_list);
bool runFillerInsertion(std::string config);


+ 6
- 0
src/platform/tool_manager/tool_manager.cpp View File

@@ -217,6 +217,12 @@ bool ToolManager::reportPlacer()
return flag;
}

bool ToolManager::runAiPlacer(std::string config, std::string onnx_path, std::string normalization_path)
{
return plInst->runAiPlacement(config, onnx_path, normalization_path);
}


////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////


+ 1
- 0
src/platform/tool_manager/tool_manager.h View File

@@ -85,6 +85,7 @@ class ToolManager
bool runPlacerIncrementalLegalization();
bool checkLegality();
bool reportPlacer();
bool runAiPlacer(std::string config = "", std::string onnx_path = "", std::string normalization_path="");

// iNO
bool RunNOFixFanout(std::string config = "");


+ 6
- 0
src/third_party/CMakeLists.txt View File

@@ -1,4 +1,10 @@
add_compile_options(-w)

if(NOT BUILD_STATIC_LIB)
# When building shared libraries, ensure position independent code
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
endif()

add_subdirectory(libfort)
add_subdirectory(LSAssigner4iEDA)
add_subdirectory(fft)


+ 17
- 1
src/vectorization/api/vec_api.cpp View File

@@ -15,8 +15,9 @@
// See the Mulan PSL v2 for more details.
// ***************************************************************************************

#include "init_sta.hh"
#include "vec_api.h"

#include "init_sta.hh"
#include "vectorization.h"

namespace ivec {
@@ -66,4 +67,19 @@ bool VectorizationApi::runVecSTA(const std::string dir)

return true;
}

bool VectorizationApi::readVectorsNets(std::string nets_dir)
{
Vectorization vectorization;
vectorization.buildLayoutData();
return vectorization.readNetsToIDB(nets_dir);
}

bool VectorizationApi::readVectorsNetsPatterns(std::string path)
{
Vectorization vectorization;
vectorization.buildLayoutData();
return vectorization.readNetsPatternToIDB(path);
}

} // namespace ivec

+ 3
- 0
src/vectorization/api/vec_api.h View File

@@ -36,6 +36,9 @@ class VectorizationApi

std::map<int, VecNet> getGraph(std::string path = "");

bool readVectorsNets(std::string nets_dir);
bool readVectorsNetsPatterns(std::string path);

private:
};


+ 3
- 0
src/vectorization/database/vec_net.h View File

@@ -158,6 +158,7 @@ class VecNet

// getter
int get_net_id() { return _net_id; }
std::string get_net_name() { return _net_name; }
std::vector<VecNetWire>& get_wires() { return _wires; }
std::vector<int>& get_pin_ids() { return _pin_ids; }
VecNetFeature* get_feature(bool b_create = false);
@@ -165,6 +166,7 @@ class VecNet
NetRoutingGraph get_routing_graph() { return _routing_graph; }
// setter
void set_net_id(int net_id) { _net_id = net_id; }
void set_net_name(std::string name) { _net_name = name; }
void set_routing_graph(const NetRoutingGraph& routing_graph) { _routing_graph = routing_graph; }

// operator
@@ -176,6 +178,7 @@ class VecNet

private:
int _net_id = -1;
std::string _net_name;
std::vector<VecNetWire> _wires;
std::vector<int> _pin_ids;
std::map<int, VecPin> _pin_list;


+ 10
- 7
src/vectorization/database/vec_node.h View File

@@ -26,11 +26,11 @@
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cstdint>
#include <map>
#include <set>
#include <string>
#include <vector>
#include <cstdint>

namespace ivec {

@@ -72,6 +72,7 @@ class VecNodeData
int32_t get_pdn_id() { return _pdn_id; }
int32_t get_pin_id() { return _pin_id; }
int32_t get_instance_id() { return _inst_id; }
int32_t get_via_id() { return _via_id; }
VecNodeTYpe get_type() { return _type; }
VecNodeConnectType get_connect_type() { return _connect_type; }
VecNodeFeature& get_feature() { return _feature; }
@@ -93,6 +94,7 @@ class VecNodeData
void set_pdn_id(int32_t id) { _pdn_id = id; }
void set_pin_id(int32_t id);
void set_instance_id(int32_t id) { _inst_id = id; }
void set_via_id(int32_t id) { _via_id = id; }
void set_type(VecNodeTYpe type);
void set_connect_type(VecNodeConnectType type);

@@ -103,6 +105,7 @@ class VecNodeData
int32_t _pdn_id = -1;
int32_t _pin_id = -1;
int32_t _inst_id = -1;
int32_t _via_id = -1;
VecNodeTYpe _type = VecNodeTYpe::kNone; /// multiple type in one node
VecNodeConnectType _connect_type = VecNodeConnectType::kNone;
VecNodeFeature _feature;
@@ -118,8 +121,8 @@ class VecNode
uint64_t get_node_id();
int64_t get_x();
int64_t get_y();
int64_t get_row_id() { return _row_id; }
int64_t get_col_id() { return _col_id; }
int get_row_id() { return _row_id; }
int get_col_id() { return _col_id; }
int16_t get_layer_id() { return _layer_id; }
int32_t get_realx() { return _real_x; }
int32_t get_realy() { return _real_y; }
@@ -127,8 +130,8 @@ class VecNode
VecNodeData* get_node_data(int net_id = -1, bool b_create = false);

// setter
void set_row_id(int64_t row_id) { _row_id = row_id; }
void set_col_id(int64_t col_id) { _col_id = col_id; }
void set_row_id(int row_id) { _row_id = row_id; }
void set_col_id(int col_id) { _col_id = col_id; }
void set_layer_id(int16_t layer_id) { _layer_id = layer_id; }
void set_real_coordinate(int32_t real_x, int32_t real_y)
{
@@ -139,8 +142,8 @@ class VecNode
// operator

private:
int64_t _row_id = -1; // node order of layer rows
int64_t _col_id = -1; // node order of layer cols
int _row_id = -1; // node order of layer rows
int _col_id = -1; // node order of layer cols
int16_t _layer_id = -1;
VecNodeData* _node_data = nullptr;
int32_t _real_x = -1;


+ 2
- 0
src/vectorization/src/data_manager/CMakeLists.txt View File

@@ -11,9 +11,11 @@ target_link_libraries(ivec_dm
ivec_patch_dm
ivec_patch_db
ivec_util
idm
)

target_include_directories(ivec_dm
PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
${HOME_UTILITY}/json
)

+ 12
- 0
src/vectorization/src/data_manager/vec_dm.cpp View File

@@ -84,4 +84,16 @@ void VecDataManager::saveData(const std::string dir)
file_io.saveJson();
}

bool VecDataManager::readNetsToIDB(std::string dir)
{
VecLayoutFileIO file_io(dir, &layout_dm.get_layout());
return file_io.readJsonNets();
}

bool VecDataManager::readNetsPatternToIDB(std::string path)
{
VecLayoutFileIO file_io(path, &layout_dm.get_layout());
return file_io.readJsonNetsPattern();
}

} // namespace ivec

+ 2
- 0
src/vectorization/src/data_manager/vec_dm.h View File

@@ -44,6 +44,8 @@ class VecDataManager

bool checkData();
void saveData(const std::string dir);
bool readNetsToIDB(std::string dir);
bool readNetsPatternToIDB(std::string path);

public:
VecLayoutDataManager layout_dm;


+ 337
- 12
src/vectorization/src/data_manager/vec_file.cpp View File

@@ -25,6 +25,7 @@

#include "Log.hh"
#include "idm.h"
#include "json_parser.h"
#include "omp.h"
#include "usage.hh"
#include "vec_grid_info.h"
@@ -41,9 +42,12 @@ void VecLayoutFileIO::makeDir(std::string dir)

bool VecLayoutFileIO::saveJson()
{
LOG_INFO << "Vectorization save json start... dir = " << _dir;
LOG_INFO << "Vectorization save json start... dir = " << _path;

makeDir(_dir);
makeDir(_path);

/// save tech
saveJsonTech();

/// save cells
saveJsonCells();
@@ -57,7 +61,7 @@ bool VecLayoutFileIO::saveJson()
/// save patch
saveJsonPatchs();

LOG_INFO << "Vectorization save json end... dir = " << _dir;
LOG_INFO << "Vectorization save json end... dir = " << _path;

return true;
}
@@ -66,7 +70,7 @@ bool VecLayoutFileIO::saveJsonNets()
{
ieda::Stats stats;
LOG_INFO << "Vectorization save json net start...";
makeDir(_dir + "/nets/");
makeDir(_path + "/nets/");

auto& net_map = _layout->get_graph().get_net_map();
const int BATCH_SIZE = 1500; // 可根据系统性能调整批量大小
@@ -103,7 +107,7 @@ bool VecLayoutFileIO::saveJsonNets()
json json_net;
{
json_net["id"] = net_id;
json_net["name"] = idb_net->get_net_name();
json_net["name"] = vec_net.get_net_name();

/// net feature
{
@@ -298,7 +302,7 @@ bool VecLayoutFileIO::saveJsonNets()

// 创建文件名格式: net_START_END.json
std::string filename = "net_" + std::to_string(start_net_idx) + "_" + std::to_string(end_net_idx) + ".json";
std::string full_path = _dir + "/nets/" + filename;
std::string full_path = _path + "/nets/" + filename;

// 创建一个包含当前批次网络的数组
json batch_json = json::array();
@@ -312,6 +316,7 @@ bool VecLayoutFileIO::saveJsonNets()
}

std::ofstream file_stream(full_path);
// file_stream << std::setw(4) << batch_json;
file_stream << batch_json;
file_stream.close();

@@ -334,7 +339,7 @@ bool VecLayoutFileIO::saveJsonPatchs()
{
ieda::Stats stats;
LOG_INFO << "Vectorization save json patchs start...";
makeDir(_dir + "/patchs/");
makeDir(_path + "/patchs/");

if (!_patch_grid) {
return false;
@@ -541,7 +546,7 @@ bool VecLayoutFileIO::saveJsonPatchs()

// 创建文件名格式: patch_START_END.json
std::string filename = "patch_" + std::to_string(start_patch_idx) + "_" + std::to_string(end_patch_idx) + ".json";
std::string full_path = _dir + "/patchs/" + filename;
std::string full_path = _path + "/patchs/" + filename;

// 创建一个包含当前批次patch的数组
json batch_json = json::array();
@@ -595,14 +600,78 @@ json VecLayoutFileIO::makeNodePair(VecNode* node1, VecNode* node2)
json_node["c2"] = node2->get_col_id(); /// col
json_node["l2"] = node2->get_layer_id(); /// layer order
json_node["p2"] = node2->get_node_data()->get_pin_id(); /// pin

// json_node["via"] = -1; /// via id

if (node1->get_layer_id() != node2->get_layer_id()) {
/// save via id
auto top_layer_node = node1->get_layer_id() > node2->get_layer_id() ? node1 : node2;
json_node["via"] = top_layer_node->get_node_data()->get_via_id();
}
return json_node;
}

bool VecLayoutFileIO::saveJsonTech()
{
ieda::Stats stats;
LOG_INFO << "Vectorization save json tech start...";
makeDir(_path + "/tech/");

json json_tech;
{
/// layers
{
auto& layer_map = _layout->get_layout_layers().get_layout_layer_map();
json_tech["layer_num"] = layer_map.size();

auto json_layer_list = json::array();
for (auto& [id, vec_layer] : layer_map) {
json json_layer;
json_layer["id"] = vec_layer.get_layer_order();
json_layer["name"] = vec_layer.get_layer_name();

json_layer_list.push_back(json_layer);
}

json_tech["layers"] = json_layer_list;
}

/// vias
{
auto& via_map = _layout->get_via_name_map();
json_tech["via_num"] = via_map.size();

auto json_via_list = json::array();
for (auto& [via_name, id] : via_map) {
json json_via;
json_via["id"] = id;
json_via["name"] = via_name;

json_via_list.push_back(json_via);
}

json_tech["vias"] = json_via_list;
}
}

std::string filename = _path + "/tech/tech.json";
std::ofstream file_stream(filename);
file_stream << json_tech;
// file_stream << std::setw(4) << json_tech;
file_stream.close();

LOG_INFO << "Vectorization memory usage " << stats.memoryDelta() << " MB";
LOG_INFO << "Vectorization elapsed time " << stats.elapsedRunTime() << " s";
LOG_INFO << "Vectorization save json tech end...";

return true;
}

bool VecLayoutFileIO::saveJsonCells()
{
ieda::Stats stats;
LOG_INFO << "Vectorization save json cells start...";
makeDir(_dir + "/cells/");
makeDir(_path + "/tech/");

json json_cells;
{
@@ -623,7 +692,7 @@ bool VecLayoutFileIO::saveJsonCells()
json_cells["cells"] = json_cell_list;
}

std::string filename = _dir + "/cells/cells.json";
std::string filename = _path + "/tech/cells.json";
std::ofstream file_stream(filename);
file_stream << json_cells;
file_stream.close();
@@ -639,7 +708,7 @@ bool VecLayoutFileIO::saveJsonInstances()
{
ieda::Stats stats;
LOG_INFO << "Vectorization save json instances start...";
makeDir(_dir + "/instances/");
makeDir(_path + "/instances/");

json json_insts;
{
@@ -667,7 +736,7 @@ bool VecLayoutFileIO::saveJsonInstances()
json_insts["instances"] = json_inst_list;
}

std::string filename = _dir + "/instances/instances.json";
std::string filename = _path + "/instances/instances.json";
std::ofstream file_stream(filename);
file_stream << json_insts;
file_stream.close();
@@ -679,4 +748,260 @@ bool VecLayoutFileIO::saveJsonInstances()
return true;
}

bool VecLayoutFileIO::readJsonNets()
{
namespace fs = std::filesystem;

auto find_json_files = [&](const fs::path& folder) -> std::vector<fs::path> {
std::vector<fs::path> result;
for (const auto& entry : fs::directory_iterator(folder)) {
if (entry.is_regular_file() && entry.path().extension() == ".json") {
result.emplace_back(entry.path());
}
}
return result;
};

auto* idb_nets = dmInst->get_idb_design()->get_net_list();
auto* idb_layers = dmInst->get_idb_layout()->get_layers();
auto* idb_vias = dmInst->get_idb_layout()->get_via_list();

idb_nets->clear_wire_list();

auto read_file = [&](std::string file) {
LOG_INFO << "read " << file;
nlohmann::json json;
std::ifstream file_stream(file);
file_stream >> json;

/// parse vector nets
for (auto& json_net : json.items()) {
std::string net_name = json_net.value()["name"];
auto* idb_net = idb_nets->find_net(net_name);
if (idb_net == nullptr) {
continue;
}

idb_net->clear_wire_list();
auto* idb_wire_list = idb_net->get_wire_list();

auto json_wires = json_net.value()["wires"];
auto* idb_wire = idb_wire_list->add_wire();
for (auto& json_wire : json_wires.items()) {
idb_wire->set_wire_state(idb::IdbWiringStatement::kRouted);

auto json_paths = json_wire.value()["paths"];
int32_t path_new = false;
for (auto& json_path : json_paths.items()) {
int p1 = json_path.value()["p1"];
int p2 = json_path.value()["p2"];
if (p1 != -1 && p2 != -1 && p1 == p2) {
/// ignore single pin shape
continue;
}

int x1 = json_path.value()["real_x1"];
int y1 = json_path.value()["real_y1"];
int l1 = json_path.value()["l1"];
std::string layer1 = _layout->findLayerName(l1);
auto* idb_layer1 = idb_layers->find_layer(layer1);
int x2 = json_path.value()["real_x2"];
int y2 = json_path.value()["real_y2"];
int l2 = json_path.value()["l2"];
std::string layer2 = _layout->findLayerName(l2);
auto* idb_layer2 = idb_layers->find_layer(layer2);

auto* idb_segment = new IdbRegularWireSegment();
if (l1 == l2) {
if (x1 == x2 && y1 == y2) {
delete idb_segment;
continue;
}

if (x1 == x2 || y1 == y2) {
} else {
/// use grid coordinate
x1 = json_path.value()["x1"];
y1 = json_path.value()["y1"];
x2 = json_path.value()["x2"];
y2 = json_path.value()["y2"];
}

idb_segment->set_layer(idb_layer1);
idb_segment->add_point(x1, y1);
idb_segment->add_point(x2, y2);

} else {
int via_id = json_path.value()["via"];
if (via_id == -1) {
LOG_WARNING << "via id error";
delete idb_segment;
continue;
}

auto* top_layer = l1 > l2 ? idb_layer1 : idb_layer2;
auto x = l1 > l2 ? x1 : x2;
auto y = l1 > l2 ? y1 : y2;
idb_segment->set_layer(top_layer);
idb_segment->set_is_via(true);
idb_segment->add_point(x, y);

auto via_name = _layout->findViaName(via_id);
if (via_name != "") {
auto* idb_via = idb_vias->find_via(via_name);
auto* idb_via_new = idb_segment->copy_via(idb_via);
idb_via_new->set_coordinate(x, y);
} else {
/// use default via, tbd
LOG_WARNING << "can not find via";
delete idb_segment;
continue;
}
}

if (path_new == false) {
idb_segment->set_layer_as_new();
path_new = true;
}

idb_wire->add_segment(idb_segment);
}
}
}

file_stream.close();
};

omp_lock_t lck;
omp_init_lock(&lck);

auto net_dir = _path + "/nets/";
#pragma omp parallel for schedule(dynamic)
for (auto& file : find_json_files(net_dir)) {
// omp_set_lock(&lck);

read_file(file);

// omp_unset_lock(&lck);
}

omp_destroy_lock(&lck);

LOG_INFO << "read nets success.";

return true;
}

bool VecLayoutFileIO::readJsonNetsPattern()
{
namespace fs = std::filesystem;

auto* idb_nets = dmInst->get_idb_design()->get_net_list();
auto* idb_layers = dmInst->get_idb_layout()->get_layers();
auto* idb_vias = dmInst->get_idb_design()->get_via_list();

idb_nets->clear_wire_list();

LOG_INFO << "read " << _path;
nlohmann::json json;
std::ifstream file_stream(_path);
file_stream >> json;

/// parse vector nets
auto json_nets = json["nets"];
for (auto& json_net : json_nets) {
std::string net_name = json_net["net_name"];
auto* idb_net = idb_nets->find_net(net_name);
if (idb_net == nullptr) {
std::cout << "can not find net " << net_name << std::endl;
continue;
}

idb_net->clear_wire_list();
auto* idb_wire_list = idb_net->get_wire_list();

auto json_edges = json_net["edges"];
auto* idb_wire = idb_wire_list->add_wire();

bool path_new = false;
for (auto& json_edge : json_edges.items()) {
idb_wire->set_wire_state(idb::IdbWiringStatement::kRouted);

auto json_start = json_edge.value()["start"];
int x_start = json_start["x"];
int y_start = json_start["y"];
int layer_index_start = json_start["layer"];

auto json_end = json_edge.value()["end"];
int x_end = json_end["x"];
int y_end = json_end["y"];
int layer_index_end = json_end["layer"];

if (layer_index_start == layer_index_end) {
if (x_start == x_end && y_start == y_end) {
std::cout << "same node " << x_start << " " << y_start << " , " << x_end << " " << y_end << std::endl;
continue;
}

if (x_start == x_end || y_start == y_end) {
} else {
std::cout << "illegal node " << x_start << " " << y_start << " , " << x_end << " " << y_end << std::endl;
continue;
}

std::string layer_metal = _layout->findLayerName(layer_index_start);
auto* idb_layer_metal = idb_layers->find_layer(layer_metal);

auto* idb_segment = idb_wire->add_segment();

idb_segment->set_layer(idb_layer_metal);
idb_segment->add_point(x_start, y_start);
idb_segment->add_point(x_end, y_end);

if (path_new == false) {
idb_segment->set_layer_as_new();
path_new = true;
}
} else {
auto top_order = std::max(layer_index_start, layer_index_end);
auto bottom_order = std::min(layer_index_start, layer_index_end);

for (auto layer_order = bottom_order; layer_order <= top_order; layer_order += 2) {
std::string bottom_layer_name = _layout->findLayerName(layer_order);
auto* bottom_layer = idb_layers->find_layer(bottom_layer_name);
std::string top_layer_name = _layout->findLayerName(layer_order + 2);
auto* top_layer = idb_layers->find_layer(top_layer_name);

auto* idb_segment = idb_wire->add_segment();
idb_segment->set_layer(top_layer);
idb_segment->set_is_via(true);
idb_segment->add_point(x_start, y_start);

/// create default via
auto cut_order = (bottom_layer->get_order() + top_layer->get_order()) / 2;
auto idb_cut_layer = (IdbLayerCut*) idb_layers->find_layer_by_order(cut_order);
auto via_name = idb_cut_layer->get_name() + "_vector_default";
auto* idb_via = idb_vias->find_via(via_name);
if (idb_via == nullptr) {
idb_via = idb_vias->createVia(via_name, idb_cut_layer);
}
auto* idb_via_new = idb_segment->copy_via(idb_via);
idb_via_new->set_coordinate(x_start, y_start);

if (path_new == false) {
idb_segment->set_layer_as_new();
path_new = true;
}
}
}
}
}

file_stream.close();

LOG_INFO << "read nets success.";

return true;
}

} // namespace ivec

+ 6
- 3
src/vectorization/src/data_manager/vec_file.h View File

@@ -30,23 +30,26 @@ using json = nlohmann::ordered_json;
class VecLayoutFileIO
{
public:
VecLayoutFileIO(std::string dir, VecLayout* layout, VecPatchGrid* patch_grid = nullptr)
VecLayoutFileIO(std::string path, VecLayout* layout, VecPatchGrid* patch_grid = nullptr)
{
_dir = dir;
_path = path;
_layout = layout;
_patch_grid = patch_grid;
}
~VecLayoutFileIO() {}

bool saveJson();
bool readJsonNets();
bool readJsonNetsPattern();

private:
std::string _dir = "";
std::string _path = "";
VecLayout* _layout = nullptr;
VecPatchGrid* _patch_grid = nullptr;

bool saveJsonNets();
bool saveJsonPatchs();
bool saveJsonTech();
bool saveJsonCells();
bool saveJsonInstances();



+ 9
- 2
src/vectorization/src/layout/data_manager/vec_layout_init.cpp View File

@@ -270,6 +270,8 @@ void VecLayoutInit::transVia(idb::IdbVia* idb_via, int net_id, VecNodeTYpe type)
node_data->set_pdn_id(net_id);
}
node_data->set_connect_type(VecNodeConnectType::vec_via);
auto via_id = _layout->findViaId(idb_via->get_name());
node_data->set_via_id(via_id);

/// botttom
auto enclosure_bottom = idb_via->get_bottom_layer_shape();
@@ -282,7 +284,7 @@ void VecLayoutInit::transVia(idb::IdbVia* idb_via, int net_id, VecNodeTYpe type)
auto enclosure_top = idb_via->get_top_layer_shape();
for (auto* rect : enclosure_top.get_rect_list()) {
transEnclosure(rect->get_low_x(), rect->get_low_y(), rect->get_high_x(), rect->get_high_y(), enclosure_top.get_layer()->get_name(),
net_id, row, col, type);
net_id, row, col, type, via_id);
}
}

@@ -633,7 +635,7 @@ void VecLayoutInit::transNetRect(int32_t ll_x, int32_t ll_y, int32_t ur_x, int32
}

void VecLayoutInit::transEnclosure(int32_t ll_x, int32_t ll_y, int32_t ur_x, int32_t ur_y, std::string layer_name, int net_id, int via_row,
int via_col, VecNodeTYpe type)
int via_col, VecNodeTYpe type, int via_id)
{
auto& layout_layers = _layout->get_layout_layers();

@@ -662,6 +664,10 @@ void VecLayoutInit::transEnclosure(int32_t ll_x, int32_t ll_y, int32_t ur_x, int
if (type == VecNodeTYpe::vec_pdn) {
node_data->set_pdn_id(net_id);
}

if (via_id != -1) {
node_data->set_via_id(via_id);
}
}
}
}
@@ -724,6 +730,7 @@ void VecLayoutInit::initNets()
if (vec_net == nullptr) {
continue;
}
vec_net->set_net_name(idb_net->get_net_name());

{
auto instance_name = driver_pin->is_io_pin() ? "" : driver_pin->get_instance()->get_name();


+ 1
- 1
src/vectorization/src/layout/data_manager/vec_layout_init.h View File

@@ -54,7 +54,7 @@ class VecLayoutInit
void transPin(idb::IdbPin* idb_pin, int net_id, VecNodeTYpe type, int instance_id = -1, int pin_id = -1, bool b_io = false);
void transVia(idb::IdbVia* idb_via, int net_id, VecNodeTYpe type);
void transEnclosure(int32_t ll_x, int32_t ll_y, int32_t ur_x, int32_t ur_y, std::string layer_name, int net_id, int via_row, int via_col,
VecNodeTYpe type);
VecNodeTYpe type, int via_id = -1);
void transNetRect(int32_t ll_x, int32_t ll_y, int32_t ur_x, int32_t ur_y, std::string layer_name, int net_id, VecNodeTYpe type);
void transNetDelta(int32_t ll_x, int32_t ll_y, int32_t ur_x, int32_t ur_y, std::string layer_name, int net_id, VecNodeTYpe type);
};


+ 2
- 1
src/vectorization/src/layout/database/vec_layout.h View File

@@ -22,9 +22,9 @@
* @description
*
*/
#include <cstdint>
#include <map>
#include <string>
#include <cstdint>

#include "vec_cell.h"
#include "vec_instance.h"
@@ -49,6 +49,7 @@ class VecLayout
VecGraph& get_graph() { return _graph; }
VecCells& get_cells() { return _cells; }
VecInstances& get_instances() { return _instances; }
std::map<std::string, int>& get_via_name_map() { return _via_name_map; }

// setter
void add_cell(VecCell cell);


+ 20
- 0
src/vectorization/src/vectorization.cpp View File

@@ -141,4 +141,24 @@ bool Vectorization::buildPatchData(const std::string dir, int patch_row_step, in
return _data_manager.buildPatchData(dir, patch_row_step, patch_col_step);
}

bool Vectorization::readNetsToIDB(const std::string dir)
{
bool b_success = _data_manager.buildLayoutData();
if (b_success) {
b_success = _data_manager.readNetsToIDB(dir);
}

return b_success;
}

bool Vectorization::readNetsPatternToIDB(const std::string path)
{
bool b_success = _data_manager.buildLayoutData();
if (b_success) {
b_success = _data_manager.readNetsPatternToIDB(path);
}

return b_success;
}

} // namespace ivec

+ 3
- 1
src/vectorization/src/vectorization.h View File

@@ -28,7 +28,7 @@ class Vectorization
Vectorization();
~Vectorization() {}

bool buildLayoutData(const std::string path);
bool buildLayoutData(const std::string path = "");
bool buildGraphData(const std::string path);
bool buildGraphDataWithoutSave(const std::string path);
std::map<int, VecNet> getGraph(std::string path);
@@ -37,6 +37,8 @@ class Vectorization
bool buildPatchData(const std::string dir, int patch_row_step, int patch_col_step);

bool runVecSTA(const std::string dir);
bool readNetsToIDB(const std::string dir);
bool readNetsPatternToIDB(const std::string path);

private:
VecDataManager _data_manager; /// top module data manager


Loading…
Cancel
Save
Baidu
map