diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..c6ac610c --- /dev/null +++ b/.coveragerc @@ -0,0 +1,3 @@ +[run] +branch = True +include = veriloggen/**/*.py diff --git a/conftest.py b/conftest.py index e61a0b5c..d1f5ff1f 100644 --- a/conftest.py +++ b/conftest.py @@ -1,8 +1,23 @@ -from __future__ import absolute_import -from __future__ import print_function - import pytest +from pathlib import Path def pytest_addoption(parser): parser.addoption('--sim', default='iverilog', help='Simulator') + + +@pytest.fixture(scope='session', autouse=True) +def clean(): + yield + for p in Path('.').glob('*.out'): + p.unlink() + for p in Path('.').glob('*.vcd'): + p.unlink() + for p in Path('.').glob('**/*.pyc'): + p.unlink() + for p in Path('.').glob('**/__pycache__'): + p.rmdir() + for p in Path('.').glob('**/parser.out'): + p.unlink() + for p in Path('.').glob('**/parsetab.py'): + p.unlink() diff --git a/pytest.ini b/pytest.ini index 7de65dde..4f06cb39 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,2 +1,3 @@ [pytest] -python_paths = ./ +pythonpath = . +testpaths = examples tests diff --git a/setup.py b/setup.py index c95efd6d..57abc646 100644 --- a/setup.py +++ b/setup.py @@ -23,9 +23,10 @@ def read(filename): 'veriloggen.simulation': ['*.cpp'], }, install_requires=['pyverilog>=1.3.0', - 'numpy>=1.17'], + 'numpy>=1.17', + 'rectpack>=0.2.2'], extras_require={ - 'test': ['pytest>=3.8.1', 'pytest-pythonpath>=0.7.3'], + 'test': ['pytest>=7.0.0'], 'graph': ['pygraphviz>=1.3.1'], }, ) diff --git a/tests/extension/asic_/check/correct.json b/tests/extension/asic_/check/correct.json new file mode 100644 index 00000000..263a6aab --- /dev/null +++ b/tests/extension/asic_/check/correct.json @@ -0,0 +1,14 @@ +{ + "DESIGN_NAME": "spm", + "VERILOG_FILES": "dir::src/*.v", + "CLOCK_PORT": "clk", + "CLOCK_PERIOD": 100, + "pdk::sky130A": { + "SYNTH_MAX_FANOUT": 6, + "FP_CORE_UTIL": 40, + "PL_TARGET_DENSITY": "expr::($FP_CORE_UTIL + 5.0) / 100.0", + "scl::sky130_fd_sc_hd": { + "CLOCK_PERIOD": 15 + } + } +} diff --git a/tests/extension/asic_/check/test_asic_check.py b/tests/extension/asic_/check/test_asic_check.py new file mode 100644 index 00000000..a6460208 --- /dev/null +++ b/tests/extension/asic_/check/test_asic_check.py @@ -0,0 +1,18 @@ +from pathlib import Path +import pytest + +from veriloggen.asic import check_file + + +files = ['correct.json', 'wrong1.json', 'wrong2.json'] + + +@pytest.mark.parametrize('fname', files) +def test(fname: str): + p = Path(__file__).parent / fname + try: + check_file(p) + except (TypeError, ValueError): + assert fname.startswith('wrong') + else: + assert fname.startswith('correct') diff --git a/tests/extension/asic_/check/wrong1.json b/tests/extension/asic_/check/wrong1.json new file mode 100644 index 00000000..a7bb0901 --- /dev/null +++ b/tests/extension/asic_/check/wrong1.json @@ -0,0 +1,14 @@ +{ + "DESIGN_NAME": "spm", + "VERILOG_FILE": "dir::src/*.v", + "CLOCK_PORT": "clk", + "CLOCK_PERIOD": 100, + "pdk::sky130A": { + "SYNTH_MAX_FANOUT": 6, + "FP_CORE_UTIL": 40, + "PL_TARGET_DENSITY": "expr::($FP_CORE_UTIL + 5.0) / 100.0", + "scl::sky130_fd_sc_hd": { + "CLOCK_PERIOD": 15 + } + } +} diff --git a/tests/extension/asic_/check/wrong2.json b/tests/extension/asic_/check/wrong2.json new file mode 100644 index 00000000..c1cda508 --- /dev/null +++ b/tests/extension/asic_/check/wrong2.json @@ -0,0 +1,12 @@ +{ + "DESIGN_NAME": "spm", + "VERILOG_FILES": "dir::src/*.v", + "CLOCK_PORT": "clk", + "CLOCK_PERIOD": 100, + "pdk::sky130A": { + "SYNTH_MAX_FANOUT": 6, + "FP_CORE_UTIL": 40, + "PL_TARGET_DENSITY": "expr::($FP_CORE_UTIL + 5.0) / 100.0", + "scl::sky130_fd_sc_hd": "dummy" + } +} diff --git a/tests/extension/asic_/conftest.py b/tests/extension/asic_/conftest.py new file mode 100644 index 00000000..a12e6804 --- /dev/null +++ b/tests/extension/asic_/conftest.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + +import subprocess +from pathlib import Path + +import pytest + + +@pytest.fixture(scope='session') +def simulation_model_path(): + subprocess.run('git clone https://github.com/VLSIDA/sky130_sram_macros.git', + shell=True, check=True) + subprocess.run('git clone https://github.com/google/globalfoundries-pdk-ip-gf180mcu_fd_ip_sram.git', + shell=True, check=True) + path_dict: dict[Literal['sky130', 'gf180mcu'], list[str]] = {} + path_dict['sky130'] = [str(p.absolute()) for p in Path('sky130_sram_macros').iterdir() if p.is_dir()] + path_dict['gf180mcu'] = [str(p.absolute()) for p in Path('globalfoundries-pdk-ip-gf180mcu_fd_ip_sram/cells').iterdir()] + yield path_dict + subprocess.run('rm -rf sky130_sram_macros', shell=True, check=True) + subprocess.run('rm -rf globalfoundries-pdk-ip-gf180mcu_fd_ip_sram', + shell=True, check=True) diff --git a/tests/extension/asic_/matmul_thread/asic_matmul_thread.py b/tests/extension/asic_/matmul_thread/asic_matmul_thread.py new file mode 100644 index 00000000..e92e7b3e --- /dev/null +++ b/tests/extension/asic_/matmul_thread/asic_matmul_thread.py @@ -0,0 +1,244 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + + +# --- to run without installation --- + +import sys +import os + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))) + +# --- to run without installation --- + + +import numpy as np + +from veriloggen import * +from veriloggen.asic import ASICSRAM, make_arr, asic_sim, generate_configs +from veriloggen import thread as vthread +from veriloggen.thread.uart import UartRx, UartTx + + +rand_seed = 123 +rand_range = (-16, 16) + + +def make_uut( + pdk: Literal['sky130', 'gf180mcu'], + matrix_size: int, + baudrate: int, + clockfreq: int, +) -> Module: + m = Module('asic_matmul_thread') + clk = m.Input('clk') + rst = m.Input('rst') + rxd = m.Input('rxd') + txd = m.Output('txd') + + uart_rx = UartRx(m, 'uart_rx', 'rx_', clk, rst, rxd, + baudrate=baudrate, clockfreq=clockfreq) + uart_tx = UartTx(m, 'uart_tx', 'tx_', clk, rst, txd, + baudrate=baudrate, clockfreq=clockfreq) + + addrwidth = (matrix_size*matrix_size - 1).bit_length() + ram_a = ASICSRAM(m, 'ram_a', clk, rst, 32, addrwidth, pdk=pdk) + ram_b = ASICSRAM(m, 'ram_b', clk, rst, 32, addrwidth, pdk=pdk) + + def comp(): + while True: + data = 0 # only to avoid undefined variable error + + # receive matrix A + for idx in range(matrix_size * matrix_size): + # big endian + data[24:32] = uart_rx.recv() + data[16:24] = uart_rx.recv() + data[8:16] = uart_rx.recv() + data[0:8] = uart_rx.recv() + ram_a.write(idx, data) + + # receive matrix B + for idx in range(matrix_size * matrix_size): + # big endian + data[24:32] = uart_rx.recv() + data[16:24] = uart_rx.recv() + data[8:16] = uart_rx.recv() + data[0:8] = uart_rx.recv() + ram_b.write(idx, data) + + # multiply matrix A and B, transmitting matrix C + a_idx = 0 + for i in range(matrix_size): + b_idx = 0 + for j in range(matrix_size): + # calculate inner product + w = 0 + for k in range(matrix_size): + x = ram_a.read(a_idx + k) + y = ram_b.read(b_idx + k) + z = x * y + w += z + + # transmit matrix C + # big endian + uart_tx.send(w[24:32]) + uart_tx.send(w[16:24]) + uart_tx.send(w[8:16]) + uart_tx.send(w[0:8]) + + b_idx += matrix_size + a_idx += matrix_size + + thd = vthread.Thread(m, 'thd', clk, rst, comp, datawidth=32) + thd.start() + + return m + + +def make_tb( + pdk: Literal['sky130', 'gf180mcu'], + matrix_size: int, + baudrate: int, + clockfreq: int, +): + m = Module('tb') + + uut = Submodule(m, make_uut(pdk, matrix_size, baudrate, clockfreq), + 'uut', as_wire=('rxd', 'txd')) + clk = uut['clk'] + rst = uut['rst'] + rxd = uut['rxd'] + txd = uut['txd'] + + uart_rx = UartRx(m, 'uart_rx', 'rx_', clk, rst, txd, + baudrate=baudrate, clockfreq=clockfreq) + uart_tx = UartTx(m, 'uart_tx', 'tx_', clk, rst, rxd, + baudrate=baudrate, clockfreq=clockfreq) + + clockperiod = 1_000_000_000 / clockfreq # in nanoseconds + simulation.setup_clock(m, clk, hperiod=clockperiod / 2) + init = simulation.setup_reset(m, rst, m.make_reset(), + period=clockperiod * 10) + + init.add( + Delay(100_000_000_000), + Finish() + ) + + rng = np.random.default_rng(rand_seed) + mat_a = rng.integers(*rand_range, (matrix_size, matrix_size), endpoint=True) + mat_b = rng.integers(*rand_range, (matrix_size, matrix_size), endpoint=True) + mat_c = mat_a @ mat_b + + a = make_arr(m, 'a', mat_a.ravel()) + b = make_arr(m, 'b', mat_b.T.ravel()) + c = make_arr(m, 'c', mat_c.ravel()) + + def test(): + ok = True + + # transmit matrix A + for i in range(matrix_size * matrix_size): + data = a[i] + uart_tx.send(data[24:32]) + uart_tx.send(data[16:24]) + uart_tx.send(data[8:16]) + uart_tx.send(data[0:8]) + + # transmit matrix B + for i in range(matrix_size * matrix_size): + data = b[i] + uart_tx.send(data[24:32]) + uart_tx.send(data[16:24]) + uart_tx.send(data[8:16]) + uart_tx.send(data[0:8]) + + # receive matrix C and check it + for i in range(matrix_size * matrix_size): + data[24:32] = uart_rx.recv() + data[16:24] = uart_rx.recv() + data[8:16] = uart_rx.recv() + data[0:8] = uart_rx.recv() + if data != c[i]: + ok = False + + if ok: + print('# verify: PASSED') + else: + print('# verify: FAILED') + + vthread.finish() + + thd = vthread.Thread(m, 'thd', clk, rst, test, datawidth=32) + thd.start() + + return m + + +def run( + pdk: Literal['sky130', 'gf180mcu'], + simulation_model_path: list[str], + matrix_size: int, + baudrate: int, + clockfreq: int, +) -> str: + """ + Simulate. + Used for testing (through `pytest`). + """ + tb = make_tb(pdk, matrix_size, baudrate, clockfreq) + rslt = asic_sim(tb, macro_model_path=simulation_model_path) + return rslt + + +def sim( + pdk: Literal['sky130', 'gf180mcu'], + pdk_root: str, + matrix_size: int, + baudrate: int, + clockfreq: int, +) -> str: + """ + Simulate. + Used for standalone execution (`if __name__ == '__main__':`). + """ + tb = make_tb(pdk, matrix_size, baudrate, clockfreq) + rslt = asic_sim(tb, pdk, pdk_root) + return rslt + + +def syn( + pdk: Literal['sky130', 'gf180mcu'], + pdk_root: str, + matrix_size: int, + baudrate: int, + clockfreq: int, + die_shape: tuple[int | float, int | float], +): + """ + Generate an HDL file and configuration files. + Used for standalone execution (`if __name__ == '__main__':`). + """ + clockperiod = 1_000_000_000 / clockfreq + uut = make_uut(pdk, matrix_size, baudrate, clockfreq) + uut.to_verilog('asic_matmul_thread.v') + generate_configs('asic_matmul_thread.v', 'asic_matmul_thread', 'clk', + clockperiod, die_shape, pdk, pdk_root) + + +if __name__ == '__main__': + pdk = 'sky130' # sky130 or gf180mcu + pdk_root = '/Users/mu/research/google/OpenLane/pdks' # change this path + matrix_size = 15 + baudrate = 2_000_000 + clockfreq = 20_000_000 + die_shape = (1000, 1000) + + syn(pdk, pdk_root, matrix_size, baudrate, clockfreq, die_shape) + rslt = sim(pdk, pdk_root, matrix_size, baudrate, clockfreq) + print(rslt) diff --git a/tests/extension/asic_/matmul_thread/test_asic_matmul_thread.py b/tests/extension/asic_/matmul_thread/test_asic_matmul_thread.py new file mode 100644 index 00000000..c179ee3c --- /dev/null +++ b/tests/extension/asic_/matmul_thread/test_asic_matmul_thread.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + +import pytest + +import veriloggen +import asic_matmul_thread + + +@pytest.mark.parametrize('pdk', ['sky130', 'gf180mcu']) +def test( + pdk: Literal['sky130', 'gf180mcu'], + simulation_model_path: dict[Literal['sky130', 'gf180mcu'], list[str]], +): + veriloggen.reset() + rslt = asic_matmul_thread.run( + pdk, simulation_model_path[pdk], + matrix_size=15, baudrate=2_000_000, clockfreq=20_000_000) + verify_rslt = rslt.splitlines()[-1] + assert verify_rslt == '# verify: PASSED' diff --git a/tests/extension/asic_/sram/asic_sram.py b/tests/extension/asic_/sram/asic_sram.py new file mode 100644 index 00000000..057f2d4b --- /dev/null +++ b/tests/extension/asic_/sram/asic_sram.py @@ -0,0 +1,153 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + + +# --- to run without installation --- + +import sys +import os + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))) + +# --- to run without installation --- + + +from veriloggen import * +from veriloggen.asic import ASICSRAM, asic_sim, generate_configs +from veriloggen.thread import Thread + + +def make_uut(pdk, datawidth, addrwidth): + iter_num = 2**addrwidth + + m = Module('asic_sram') + clk = m.Input('clk') + rst = m.Input('rst') + ok = m.OutputReg('ok', initval=0) + + ram = ASICSRAM(m, 'ram', clk, rst, datawidth, addrwidth, pdk=pdk) + + def comp(): + ok_flag = True + + write_sum = 0 + for i in range(iter_num): + # `i[:4]` rather than `i` is used to avoid overflow + write_data = i[:4] + ram.write(i, write_data) + write_sum += write_data + + read_sum = 0 + for i in range(iter_num): + read_data = ram.read(i) + read_sum += read_data + # `i[:4]` rather than `i` is used to avoid overflow + if read_data != i[:4]: + ok_flag = False + + if read_sum != write_sum: + ok_flag = False + + if ok_flag: + ok.value = 1 + + # `datawidth=32` rather than `datawidth=datawidth` + # is used to avoid overflow + thd = Thread(m, 'thd', clk, rst, comp, datawidth=32) + thd.start() + + return m + + +def make_tb(pdk, datawidth, addrwidth, clock_period, simulation_time): + m = Module('tb') + + uut = Submodule(m, make_uut(pdk, datawidth, addrwidth), 'uut') + clk = uut['clk'] + rst = uut['rst'] + ok = uut['ok'] + + simulation.setup_clock(m, clk, hperiod=clock_period / 2) + init = simulation.setup_reset(m, rst, m.make_reset(), + period=clock_period * 10) + init.add( + Delay(simulation_time), + If(ok)( + Display('# verify: PASSED') + ).Else( + Display('# verify: FAILED') + ), + Finish(), + ) + + return m + + +def run( + pdk: Literal['sky130', 'gf180mcu'], + simulation_model_path: list[str], + datawidth: int, + addrwidth: int, + clock_period: int | float, + simulation_time: int, +) -> str: + """ + Simulate. + Used for testing (through `pytest`). + """ + tb = make_tb(pdk, datawidth, addrwidth, clock_period, simulation_time) + rslt = asic_sim(tb, macro_model_path=simulation_model_path) + return rslt + + +def sim( + pdk: Literal['sky130', 'gf180mcu'], + pdk_root: str, + datawidth: int, + addrwidth: int, + clock_period: int | float, + simulation_time: int, +) -> str: + """ + Simulate. + Used for standalone execution (`if __name__ == '__main__':`). + """ + tb = make_tb(pdk, datawidth, addrwidth, clock_period, simulation_time) + rslt = asic_sim(tb, pdk, pdk_root) + return rslt + + +def syn( + pdk: Literal['sky130', 'gf180mcu'], + pdk_root: str, + datawidth: int, + addrwidth: int, + clock_period: int | float, + die_shape: tuple[int | float, int | float], +): + """ + Generate an HDL file and configuration files. + Used for standalone execution (`if __name__ == '__main__':`). + """ + make_uut(pdk, datawidth, addrwidth).to_verilog('asic_sram.v') + generate_configs('asic_sram.v', 'asic_sram', 'clk', + clock_period, die_shape, pdk, pdk_root) + + +if __name__ == '__main__': + pdk = 'sky130' # sky130 or gf180mcu + pdk_root = '/Users/mu/research/google/OpenLane/pdks' # change this path + datawidth = 16 + addrwidth = 8 + clock_period = 50 # in nanoseconds + simulation_time = 10_000_000 # in nanoseconds + die_shape = (1000, 1000) + + syn(pdk, pdk_root, datawidth, addrwidth, clock_period, die_shape) + rslt = sim(pdk, pdk_root, datawidth, addrwidth, + clock_period, simulation_time) + print(rslt) diff --git a/tests/extension/asic_/sram/test_asic_sram.py b/tests/extension/asic_/sram/test_asic_sram.py new file mode 100644 index 00000000..ef4d068e --- /dev/null +++ b/tests/extension/asic_/sram/test_asic_sram.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + +import pytest + +import veriloggen +import asic_sram + + +@pytest.mark.parametrize('pdk', ['sky130', 'gf180mcu']) +@pytest.mark.parametrize('datawidth', [8, 16, 32]) +@pytest.mark.parametrize('addrwidth', list(range(6, 10))) +def test( + pdk: Literal['sky130', 'gf180mcu'], + datawidth: int, + addrwidth: int, + simulation_model_path: dict[Literal['sky130', 'gf180mcu'], list[str]], +): + veriloggen.reset() + rslt = asic_sram.run(pdk, simulation_model_path[pdk], datawidth, addrwidth, + clock_period=50, simulation_time=1_000_000) + verify_rslt = rslt.splitlines()[-1] + assert verify_rslt == '# verify: PASSED' diff --git a/veriloggen/asic/__init__.py b/veriloggen/asic/__init__.py new file mode 100644 index 00000000..36eadffa --- /dev/null +++ b/veriloggen/asic/__init__.py @@ -0,0 +1,5 @@ +from .check import check_data, check_file +from .macro import generate_configs +from .simulation import asic_sim +from .sram import ASICSRAM +from .util import make_arr diff --git a/veriloggen/asic/check.py b/veriloggen/asic/check.py new file mode 100644 index 00000000..18d17dbc --- /dev/null +++ b/veriloggen/asic/check.py @@ -0,0 +1,265 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Any + from os import PathLike + +import json + + +valid_vars = [ + 'DESIGN_NAME', + 'VERILOG_FILES', + 'CLOCK_PERIOD', + 'CLOCK_NET', + 'CLOCK_PORT', + 'PDK', + 'STD_CELL_LIBRARY', + 'STD_CELL_LIBRARY_OPT', + 'PDK_ROOT', + 'DIODE_PADDING', + 'MERGED_LEF', + 'NO_SYNTH_CELL_LIST', + 'DRC_EXCLUDE_CELL_LIST', + 'VERILOG_FILES_BLACKBOX', + 'EXTRA_LEFS', + 'EXTRA_LIBS', + 'EXTRA_GDS_FILES', + 'SYNTH_AUTONAME', + 'SYNTH_BIN', + 'SYNTH_CAP_LOAD', + 'SYNTH_MAX_FANOUT', + 'SYNTH_MAX_TRAN', + 'SYNTH_CLOCK_UNCERTAINTY', + 'SYNTH_CLOCK_TRANSITION', + 'SYNTH_TIMING_DERATE', + 'SYNTH_STRATEGY', + 'SYNTH_BUFFERING', + 'SYNTH_SIZING', + 'SYNTH_READ_BLACKBOX_LIB', + 'SYNTH_NO_FLAT', + 'SYNTH_SHARE_RESOURCES', + 'SYNTH_ADDER_TYPE', + 'SYNTH_EXTRA_MAPPING_FILE', + 'SYNTH_PARAMETERS', + 'SYNTH_ELABORATE_ONLY', + 'CLOCK_BUFFER_FANOUT', + 'BASE_SDC_FILE', + 'VERILOG_INCLUDE_DIRS', + 'SYNTH_FLAT_TOP', + 'IO_PCT', + 'STA_WRITE_LIB', + 'FP_CORE_UTIL', + 'FP_ASPECT_RATIO', + 'FP_SIZING', + 'DIE_AREA', + 'CORE_AREA', + 'FP_IO_MODE', + 'FP_WELLTAP_CELL', + 'FP_ENDCAP_CELL', + 'FP_PDN_VOFFSET', + 'FP_PDN_VPITCH', + 'FP_PDN_HOFFSET', + 'FP_PDN_HPITCH', + 'FP_PDN_AUTO_ADJUST', + 'FP_PDN_SKIPTRIM', + 'FP_TAPCELL_DIST', + 'FP_IO_VEXTEND', + 'FP_IO_HEXTEND', + 'FP_IO_VLENGTH', + 'FP_IO_HLENGTH', + 'FP_IO_VTHICKNESS_MULT', + 'FP_IO_HTHICKNESS_MULT', + 'FP_IO_UNMATCHED_ERROR', + 'BOTTOM_MARGIN_MULT', + 'TOP_MARGIN_MULT', + 'LEFT_MARGIN_MULT', + 'RIGHT_MARGIN_MULT', + 'FP_PDN_CORE_RING', + 'FP_PDN_ENABLE_RAILS', + 'FP_PDN_ENABLE_MACROS_GRID', + 'FP_PDN_MACRO_HOOKS', + 'FP_PDN_CHECK_NODES', + 'FP_TAP_HORIZONTAL_HALO', + 'FP_TAP_VERTICAL_HALO', + 'FP_PDN_HORIZONTAL_HALO', + 'FP_PDN_VERTICAL_HALO', + 'DESIGN_IS_CORE', + 'FP_PIN_ORDER_CFG', + 'FP_CONTEXT_DEF', + 'FP_CONTEXT_LEF', + 'FP_DEF_TEMPLATE', + 'VDD_NETS', + 'GND_NETS', + 'SYNTH_USE_PG_PINS_DEFINES', + 'FP_IO_MIN_DISTANCE', + 'FP_PADFRAME_CFG', + 'FP_PDN_IRDROP', + 'FP_IO_HMETAL', + 'FP_IO_VMETAL', + 'RSZ_LIB', + 'RSZ_DONT_TOUCH_RX', + 'LIB_RESIZER_OPT', + 'PL_TARGET_DENSITY', + 'PL_TIME_DRIVEN', + 'PL_BASIC_PLACEMENT', + 'PL_SKIP_INITIAL_PLACEMENT', + 'PL_RANDOM_GLB_PLACEMENT', + 'PL_RANDOM_INITIAL_PLACEMENT', + 'PL_ROUTABILITY_DRIVEN', + 'PL_RESIZER_DESIGN_OPTIMIZATIONS', + 'PL_RESIZER_TIMING_OPTIMIZATIONS', + 'PL_RESIZER_MAX_WIRE_LENGTH', + 'PL_RESIZER_MAX_SLEW_MARGIN', + 'PL_RESIZER_MAX_CAP_MARGIN', + 'PL_RESIZER_HOLD_SLACK_MARGIN', + 'PL_RESIZER_SETUP_SLACK_MARGIN', + 'PL_RESIZER_HOLD_MAX_BUFFER_PERCENT', + 'PL_RESIZER_SETUP_MAX_BUFFER_PERCENT', + 'PL_RESIZER_ALLOW_SETUP_VIOS', + 'DONT_USE_CELLS', + 'PL_ESTIMATE_PARASITICS', + 'PL_OPTIMIZE_MIRRORING', + 'PL_RESIZER_BUFFER_INPUT_PORTS', + 'PL_RESIZER_BUFFER_OUTPUT_PORTS', + 'PL_RESIZER_REPAIR_TIE_FANOUT', + 'PL_MAX_DISPLACEMENT_X', + 'PL_MAX_DISPLACEMENT_Y', + 'PL_MACRO_HALO', + 'PL_MACRO_CHANNEL', + 'MACRO_PLACEMENT_CFG', + 'UNBUFFER_NETS', + 'DONT_BUFFER_PORTS', + 'CTS_TARGET_SKEW', + 'CLOCK_TREE_SYNTH', + 'CTS_TOLERANCE', + 'CTS_SINK_CLUSTERING_SIZE', + 'CTS_SINK_CLUSTERING_MAX_DIAMETER', + 'CTS_REPORT_TIMING', + 'CTS_CLK_MAX_WIRE_LENGTH', + 'CTS_DISABLE_POST_PROCESSING', + 'CTS_DISTANCE_BETWEEN_BUFFERS', + 'LIB_CTS', + 'FILL_INSERTION', + 'RUN_SIMPLE_CTS', + 'GLOBAL_ROUTER', + 'DETAILED_ROUTER', + 'ROUTING_CORES', + 'RT_CLOCK_MIN_LAYER', + 'RT_CLOCK_MAX_LAYER', + 'GLB_RESIZER_TIMING_OPTIMIZATIONS', + 'GLB_RESIZER_MAX_WIRE_LENGTH', + 'GLB_RESIZER_MAX_SLEW_MARGIN', + 'GLB_RESIZER_MAX_CAP_MARGIN', + 'GLB_RESIZER_HOLD_SLACK_MARGIN', + 'GLB_RESIZER_SETUP_SLACK_MARGIN', + 'GLB_RESIZER_HOLD_MAX_BUFFER_PERCENT', + 'GLB_RESIZER_SETUP_MAX_BUFFER_PERCENT', + 'GLB_RESIZER_ALLOW_SETUP_VIOS', + 'GLB_OPTIMIZE_MIRRORING', + 'GRT_ALLOW_CONGESTION', + 'GRT_OVERFLOW_ITERS', + 'GRT_ANT_ITERS', + 'GRT_ESTIMATE_PARASITICS', + 'GRT_MAX_DIODE_INS_ITERS', + 'GRT_OBS', + 'GRT_ADJUSTMENT', + 'GRT_MACRO_EXTENSION', + 'DRT_MIN_LAYER', + 'DRT_MAX_LAYER', + 'DRT_OPT_ITERS', + 'ROUTING_OPT_ITERS', + 'GLB_RT_MINLAYER', + 'GLB_RT_MAXLAYER', + 'GLB_RT_CLOCK_MINLAYER', + 'GLB_RT_CLOCK_MAXLAYER', + 'GLB_RT_UNIDIRECTIONAL', + 'GLB_RT_TILES', + 'SPEF_EXTRACTOR', + 'RCX_MERGE_VIA_WIRE_RES', + 'RCX_SDC_FILE', + 'SPEF_WIRE_MODEL', + 'SPEF_EDGE_CAP_FACTOR', + 'MAGIC_PAD', + 'MAGIC_ZEROIZE_ORIGIN', + 'MAGIC_GENERATE_GDS', + 'MAGIC_GENERATE_LEF', + 'MAGIC_GENERATE_MAGLEF', + 'MAGIC_WRITE_FULL_LEF', + 'MAGIC_DRC_USE_GDS', + 'MAGIC_EXT_USE_GDS', + 'MAGIC_INCLUDE_GDS_POINTERS', + 'MAGIC_DISABLE_HIER_GDS', + 'MAGIC_DEF_NO_BLOCKAGES', + 'MAGIC_DEF_LABELS', + 'MAGIC_GDS_ALLOW_ABSTRACT', + 'MAGIC_GDS_POLYGON_SUBCELLS', + 'LVS_INSERT_POWER_PINS', + 'LVS_CONNECT_BY_LABEL', + 'YOSYS_REWRITE_VERILOG', + 'RUN_DRT', + 'RUN_LVS', + 'RUN_MAGIC', + 'RUN_MAGIC_DRC', + 'RUN_KLAYOUT', + 'RUN_KLAYOUT_DRC', + 'RUN_KLAYOUT_XOR', + 'RUN_SPEF_EXTRACTION', + 'RUN_CVC', + 'RUN_IRDROP_REPORT', + 'RUN_TAP_DECAP_INSERTION', + 'RUN_FILL_INSERTION', + 'KLAYOUT_DRC_KLAYOUT_GDS', + 'GENERATE_FINAL_SUMMARY_REPORT', + 'LEC_ENABLE', + 'USE_GPIO_PADS', + 'PRIMARY_SIGNOFF_TOOL', + 'KLAYOUT_XOR_GDS', + 'KLAYOUT_XOR_XML', + 'TAKE_LAYOUT_SCROT', + 'KLAYOUT_XOR_THREADS', + 'DIODE_INSERTION_STRATEGY', + 'USE_ARC_ANTENNA_CHECK', + 'TAP_DECAP_INSERTION', + 'MAGIC_CONVERT_DRC_TO_RDB', + 'TEST_MISMATCHES', + 'QUIT_ON_MISMATCHES', + 'KLAYOUT_XOR_GDS', + 'KLAYOUT_XOR_XML', + 'CHECK_UNMAPPED_CELLS', + 'CHECK_ASSIGN_STATEMENTS', + 'QUIT_ON_TR_DRC', + 'QUIT_ON_LONG_WIRE', + 'QUIT_ON_MAGIC_DRC', + 'QUIT_ON_ILLEGAL_OVERLAPS', + 'QUIT_ON_LVS_ERROR', +] + + +def check_sub(key: Any, value: Any): + if not isinstance(key, str): + raise TypeError('key must be a string') + if key.startswith(('pdk::', 'scl::')): + if not isinstance(value, dict): + raise TypeError('value must be a dictionary in conditional execution') + for k, v in value.items(): + check_sub(k, v) + elif key not in valid_vars: + raise ValueError(f'invalid variable name: {key}') + + +def check_data(json_data: Any): + if not isinstance(json_data, dict): + raise TypeError('configuration must be a dictionary') + for k, v in json_data.items(): + check_sub(k, v) + + +def check_file(json_path: str | PathLike = 'config.json'): + with open(json_path) as f: + check_data(json.load(f)) + + +if __name__ == '__main__': + check_file() diff --git a/veriloggen/asic/macro.py b/veriloggen/asic/macro.py new file mode 100644 index 00000000..b0f5763c --- /dev/null +++ b/veriloggen/asic/macro.py @@ -0,0 +1,230 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + +import json +from warnings import warn +from pathlib import Path +import re +from rectpack import newPacker, SORT_NONE +from pyverilog.vparser import ast as vast +from pyverilog.vparser.parser import VerilogParser + + +def place_macros( + macro_shapes: list[tuple[int | float, int | float]], + die_shape: tuple[int | float, int | float], + halo_min: int | float = 0, + halo_max: int | float | None = None, + threshold: int | float = 0.1, + allow_rotation: bool = False, +) -> list[tuple[float, float, bool]]: + if halo_max is None: + halo_max = max(die_shape) + halo_lb = halo_min + halo_ub = halo_max + while halo_ub - halo_lb > threshold: + halo = (halo_lb + halo_ub) / 2 + packer = newPacker(sort_algo=SORT_NONE, rotation=allow_rotation) + for width, height in macro_shapes: + packer.add_rect(width + 2*halo, height + 2*halo) + packer.add_bin(*die_shape) + packer.pack() + if len(packer.rect_list()) == len(macro_shapes): + halo_lb = halo + else: + halo_ub = halo + halo = halo_lb + packer = newPacker(sort_algo=SORT_NONE, rotation=allow_rotation) + for width, height in macro_shapes: + packer.add_rect(width + 2*halo, height + 2*halo) + packer.add_bin(*die_shape) + packer.pack() + result: list[tuple[float, float, bool]] = [] + for (width, height), (_, x, y, w, h, _) in zip(macro_shapes, packer.rect_list()): + if (allow_rotation and + (w - height - 2*halo)**2 + (h - width - 2*halo)**2 < (w - width - 2*halo)**2 + (h - height - 2*halo)**2): + result.append((x + halo, y + halo, True)) + else: + result.append((x + halo, y + halo, False)) + return result + + +def extract_macros_sub( + node: vast.ModuleDef, + prefix: list[str], + macros: list[tuple[str, str]], + modules: dict[str, vast.ModuleDef], +) -> None: + for item in node.items: + if isinstance(item, vast.InstanceList): + for inst in item.instances: + if not isinstance(inst, vast.Instance): + raise RuntimeError + if inst.module in modules: + extract_macros_sub(modules[inst.module], + prefix + [inst.name], macros, modules) + else: + macros.append((inst.module, '.'.join(prefix + [inst.name]))) + + +def extract_macros(verilog: str, top: str) -> list[tuple[str, str]]: + modules: dict[str, vast.ModuleDef] = {} + parser = VerilogParser() + src = parser.parse(verilog) + if not isinstance(src, vast.Source): + raise RuntimeError + desc = src.description + if not isinstance(desc, vast.Description): + raise RuntimeError + defs = desc.definitions + if not isinstance(defs, tuple): + raise RuntimeError + for d in defs: + if isinstance(d, vast.ModuleDef): + modules[d.name] = d + + macros: list[tuple[str, str]] = [] + extract_macros_sub(modules[top], [], macros, modules) + return macros + + +def generate_configs( + src_local_path: str, + top: str, + clk: str, + clock_period: int | float, + die_shape: tuple[int | float, int | float], + pdk: Literal['sky130', 'gf180mcu', 'sky130A', 'sky130B', 'gf180mcuA', 'gf180mcuB', 'gf180mcuC'], + pdk_root: str | None = None, + macro_local_path: str | None = None, + macro_docker_path: str | None = None, + vdd: str | None = None, + gnd: str | None = None, +): + if pdk not in ['sky130', 'gf180mcu', 'sky130A', 'sky130B', 'gf180mcuA', 'gf180mcuB', 'gf180mcuC']: + raise ValueError(f'Invalid PDK: {pdk}') + if pdk == 'sky130': + # sky130A is default for sky130 + pdk += 'A' + if pdk == 'gf180mcu': + # gf180mcuC is default for gf180mcu + pdk += 'C' + + if (vdd is None) != (gnd is None): + raise TypeError('Specify both or neither of `vdd` and `gnd`') + if vdd is None and gnd is None: + if pdk.startswith('sky130'): + vdd = 'vccd1' + gnd = 'vssd1' + else: + vdd = 'VDD' + gnd = 'VSS' + + if macro_local_path is None and macro_docker_path is None: + if pdk_root is None: + raise TypeError('`pdk_root` must be specified if `macro_local_path` and `macro_docker_path` are not specified') + macro_local_path = str(Path(pdk_root) / pdk / 'libs.ref') + macro_docker_path = '/'.join(['/openlane/pdks', pdk, 'libs.ref']) + elif macro_docker_path is None: + macro_docker_path = macro_local_path + elif macro_local_path is None: + raise TypeError('It is invalid to specify `macro_docker_path` and not specify `macro_local_path`') + + with open(src_local_path) as f: + src = f.read() + + macros = extract_macros(src, top) + macro_modules = list({mod for mod, _ in macros}) + macro_insts = [inst for _, inst in macros] + macro_shapes: dict[str, tuple[int | float, int | float]] = {} + + config_json = {} + + config_json['PDK'] = pdk + config_json['DESIGN_NAME'] = top + config_json['VERILOG_FILES'] = 'dir::src/*.v' + config_json['CLOCK_PORT'] = clk + config_json['CLOCK_PERIOD'] = clock_period + config_json['DESIGN_IS_CORE'] = True + + config_json['FP_SIZING'] = 'absolute' + config_json['DIE_AREA'] = ' '.join([str(num) for num in [0, 0] + list(die_shape)]) + config_json['PL_TARGET_DENSITY'] = 0.5 + + config_json['VDD_NETS'] = vdd + config_json['GND_NETS'] = gnd + config_json['FP_PDN_MACRO_HOOKS'] = ', '.join([' '.join([inst, vdd, gnd, vdd, gnd]) for inst in macro_insts]) + + config_json['MACRO_PLACEMENT_CFG'] = 'dir::macro_placement.cfg' + + pdk_lefs = list(Path(macro_local_path).glob('**/*.lef')) + pdk_gdss = list(Path(macro_local_path).glob('**/*.gds')) + pdk_libs = list(Path(macro_local_path).glob('**/*.lib')) + extra_lefs: list[str] = [] + extra_gds_files: list[str] = [] + extra_libs: list[str] = [] + for mod in macro_modules: + lefs: list[Path] = [] + for lef in pdk_lefs: + if lef.name.startswith(mod): + lefs.append(lef) + if not lefs: + raise RuntimeError('No LEF file found') + if len(lefs) > 1: + warn('\n'.join(['More than one LEF files found:'] + [str(lef) for lef in lefs]), RuntimeWarning) + extra_lefs.append(str(Path(macro_docker_path) / lefs[0].relative_to(macro_local_path))) + with lefs[0].open() as f: + for l in f: + m = re.search(r'SIZE ([0-9\.]+) BY ([0-9\.]+)', l) + if m: + try: + x = int(m[1]) + except ValueError: + x = float(m[1]) + try: + y = int(m[2]) + except ValueError: + y = float(m[2]) + break + else: + raise RuntimeError(f'The found LEF file ({lefs[0]}) does not contain size information') + macro_shapes[mod] = (x, y) + gdss: list[Path] = [] + for gds in pdk_gdss: + if gds.name.startswith(mod): + gdss.append(gds) + if not gdss: + raise RuntimeError('No GDS file found') + if len(gdss) > 1: + warn('\n'.join(['More than one GDS files found:'] + [str(gds) for gds in gdss]), RuntimeWarning) + extra_gds_files.append(str(Path(macro_docker_path) / gdss[0].relative_to(macro_local_path))) + libs: list[Path] = [] + for lib in pdk_libs: + if lib.name.startswith(mod): + libs.append(lib) + if not libs: + raise RuntimeError('No LIB file found') + if len(libs) > 1: + warn('\n'.join(['More than one LIB files found:'] + [str(lib) for lib in libs]), RuntimeWarning) + extra_libs.append(str(Path(macro_docker_path) / libs[0].relative_to(macro_local_path))) + config_json['EXTRA_LEFS'] = ' '.join(extra_lefs) + config_json['EXTRA_GDS_FILES'] = ' '.join(extra_gds_files) + config_json['EXTRA_LIBS'] = ' '.join(extra_libs) + + config_json['RUN_KLAYOUT_XOR'] = False + config_json['MAGIC_DRC_USE_GDS'] = False + config_json['QUIT_ON_MAGIC_DRC'] = False + + with open('config.json', 'w') as f: + json.dump(config_json, f, indent=4) + + macro_placement_cfg: list[str] = [] + macro_placement = place_macros([macro_shapes[mod] for mod, _ in macros], + die_shape, allow_rotation=True) + for (_, inst), (x, y, r) in zip(macros, macro_placement): + macro_placement_cfg.append(' '.join([inst, str(x), str(y), 'R90' if r else 'R0'])) + with open('macro_placement.cfg', 'w') as f: + f.write('\n'.join(macro_placement_cfg)) diff --git a/veriloggen/asic/simulation.py b/veriloggen/asic/simulation.py new file mode 100644 index 00000000..c70588ec --- /dev/null +++ b/veriloggen/asic/simulation.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + from veriloggen.core.module import Module + +from pathlib import Path +from veriloggen.simulation.simulation import run_iverilog + + +def asic_sim( + test_bench: Module, + pdk: Literal['sky130', 'gf180mcu', 'sky130A', 'sky130B', 'gf180mcuA', 'gf180mcuB', 'gf180mcuC'] | None = None, + pdk_root: str | None = None, + macro_model_path: str | list[str] | None = None, +): + if pdk is not None: + if pdk not in ['sky130', 'gf180mcu', 'sky130A', 'sky130B', 'gf180mcuA', 'gf180mcuB', 'gf180mcuC']: + raise ValueError(f'Invalid PDK: {pdk}') + if pdk == 'sky130': + # sky130A is default for sky130 + pdk += 'A' + if pdk == 'gf180mcu': + # gf180mcuC is default for gf180mcu + pdk += 'C' + + if macro_model_path is None: + if (pdk is None) != (pdk_root is None): + raise TypeError('Specify both or neither of `pdk` and `pdk_root`') + if pdk is not None and pdk_root is not None: + if pdk.startswith('sky130'): + macro_model_path = str(Path(pdk_root) / pdk / 'libs.ref' / 'sky130_sram_macros' / 'verilog') + if pdk.startswith('gf180mcu'): + macro_model_path = str(Path(pdk_root) / pdk / 'libs.ref' / 'gf180mcu_fd_ip_sram' / 'verilog') + + if isinstance(macro_model_path, str): + macro_model_path = [macro_model_path] + + if macro_model_path is None: + return run_iverilog([test_bench], timescale=('1ns', '1ps')) + else: + return run_iverilog([test_bench], libdir=macro_model_path, timescale=('1ns', '1ps')) diff --git a/veriloggen/asic/sram.py b/veriloggen/asic/sram.py new file mode 100644 index 00000000..1770279d --- /dev/null +++ b/veriloggen/asic/sram.py @@ -0,0 +1,360 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import Literal + +from functools import partial +from veriloggen.core.module import Module +from veriloggen.core import vtypes +from veriloggen.seq.seq import Seq, make_condition +from veriloggen.types import util +from veriloggen.thread.ram import RAM + + +def zero(width: int): + if width <= 0: + raise ValueError + return vtypes.Int(0, width) + + +def pad(data, width): + if data.get_width() > width: + raise ValueError + if data.get_width() == width: + return data + else: + return vtypes.Cat(vtypes.Int(0, width - data.get_width()), data) + + +def make_mux_sub(data, select, index): + if index == 0: + return vtypes.Cond(select[0], data[1], data[0]) + return vtypes.Cond(select[index], + make_mux_sub(data[len(data) // 2:], select, index - 1), + make_mux_sub(data[:len(data) // 2], select, index - 1)) + + +def make_mux(data_list, selector): + n = len(data_list) + if n <= 1 or n & (n - 1) != 0: + raise ValueError + k = n.bit_length() - 1 + if selector.get_width() != k: + raise ValueError + return make_mux_sub(data_list, selector, k - 1) + + +def make_decoder(data): + k = data.get_width() + n = 2**k + return vtypes.Cat(*reversed([data == i for i in range(n)])) + + +def make_sky130_sram( + name: str, + datawidth: int, + addrwidth: int, +) -> Module: + m = Module(name) + + clk = m.Input('clk') # clock + ce0 = m.Input('ce0') # chip enable for port 0 (active high) + cen0 = m.Wire('cen0') + cen0.assign(vtypes.Not(ce0)) + ce1 = m.Input('ce1') # chip enable for port 1 (active high) + cen1 = m.Wire('cen1') + cen1.assign(vtypes.Not(ce1)) + we = m.Input('we') # write enable for port 0 (active high) + wen = m.Wire('wen') + wen.assign(vtypes.Not(we)) + addr0 = m.Input('addr0', addrwidth) # address for port 0 + addr1 = m.Input('addr1', addrwidth) # address for port 1 + din = m.Input('din', datawidth) # input data = written data for port 0 + dout0 = m.Output('dout0', datawidth) # output data = read data for port 0 + dout1 = m.Output('dout1', datawidth) # output data = read data for port 1 + + if datawidth <= 8: + if addrwidth <= 11: + if addrwidth == 11: + macro_name = 'sky130_sram_2kbyte_1rw1r_32x512_8' + else: + macro_name = 'sky130_sram_1kbyte_1rw1r_32x256_8' + if addrwidth < 2: + raise ValueError + mux_sel0 = m.Wire('mux_sel0', 2) + mux_sel0.assign(addr0[:2]) + mux_sel1 = m.Wire('mux_sel1', 2) + mux_sel1.assign(addr1[:2]) + sram_addr0 = addr0[2:] + sram_addr1 = addr1[2:] + if addrwidth < 10: + sram_addr0 = vtypes.Cat(zero(10 - addrwidth), sram_addr0) + sram_addr1 = vtypes.Cat(zero(10 - addrwidth), sram_addr1) + raw_dout0 = m.Wire('raw_dout0', 32) + raw_dout1 = m.Wire('raw_dout1', 32) + dout_list0 = [m.Wire(f'dout0_{i}', datawidth) for i in range(4)] + dout_list1 = [m.Wire(f'dout1_{i}', datawidth) for i in range(4)] + for i in range(4): + dout_list0[i].assign(raw_dout0[8*i:8*(i+1)]) + dout_list1[i].assign(raw_dout1[8*i:8*(i+1)]) + dout0.assign(make_mux(dout_list0, mux_sel0)) + dout1.assign(make_mux(dout_list1, mux_sel1)) + ports = [ + ('clk0', clk), ('clk1', clk), ('csb0', cen0), ('csb1', cen1), + ('web0', wen), ('wmask0', make_decoder(mux_sel0)), + ('addr0', sram_addr0), ('addr1', sram_addr1), + ('din0', pad(din, 8).repeat(4)), ('dout0', raw_dout0), + ('dout1', raw_dout1) + ] + m.Instance(macro_name, 'ram', ports=ports, workaround=True) + else: + raise ValueError('not yet implemented') + elif datawidth <= 16: + if addrwidth <= 10: + if addrwidth == 10: + macro_name = 'sky130_sram_2kbyte_1rw1r_32x512_8' + else: + macro_name = 'sky130_sram_1kbyte_1rw1r_32x256_8' + if addrwidth < 1: + raise ValueError + mux_sel0 = m.Wire('mux_sel0', 1) + mux_sel0.assign(addr0[0]) + mux_sel1 = m.Wire('mux_sel1', 1) + mux_sel1.assign(addr1[1]) + sram_addr0 = addr0[1:] + sram_addr1 = addr1[1:] + if addrwidth < 9: + sram_addr0 = vtypes.Cat(zero(9 - addrwidth), sram_addr0) + sram_addr1 = vtypes.Cat(zero(9 - addrwidth), sram_addr1) + raw_dout0 = m.Wire('raw_dout0', 32) + raw_dout1 = m.Wire('raw_dout1', 32) + dout_list0 = [m.Wire(f'dout0_{i}', datawidth) for i in range(2)] + dout_list1 = [m.Wire(f'dout1_{i}', datawidth) for i in range(2)] + for i in range(2): + dout_list0[i].assign(raw_dout0[16*i:16*(i+1)]) + dout_list1[i].assign(raw_dout1[16*i:16*(i+1)]) + dout0.assign(make_mux(dout_list0, mux_sel0)) + dout1.assign(make_mux(dout_list1, mux_sel1)) + byte_write_enable = vtypes.Cat( + mux_sel0.repeat(2), vtypes.Not(mux_sel0).repeat(2)) + ports = [ + ('clk0', clk), ('clk1', clk), ('csb0', cen0), ('csb1', cen1), + ('web0', wen), ('wmask0', byte_write_enable), + ('addr0', sram_addr0), ('addr1', sram_addr1), + ('din0', pad(din, 16).repeat(2)), ('dout0', raw_dout0), + ('dout1', raw_dout1) + ] + m.Instance(macro_name, 'ram', ports=ports, workaround=True) + else: + raise ValueError('not yet implemented') + elif datawidth <= 32: + if addrwidth <= 9: + if addrwidth == 9: + macro_name = 'sky130_sram_2kbyte_1rw1r_32x512_8' + pad_addr = partial(pad, width=9) + else: + macro_name = 'sky130_sram_1kbyte_1rw1r_32x256_8' + pad_addr = partial(pad, width=8) + raw_dout0 = m.Wire('raw_dout0', 32) + raw_dout1 = m.Wire('raw_dout1', 32) + dout0.assign(raw_dout0[:datawidth]) + dout1.assign(raw_dout1[:datawidth]) + # NOTE: `wmask0` is really active high? + ports = [ + ('clk0', clk), ('clk1', clk), ('csb0', cen0), ('csb1', cen1), + ('web0', wen), ('wmask0', vtypes.Int(1, 1).repeat(4)), + ('addr0', pad_addr(addr0)), ('addr1', pad_addr(addr1)), + ('din0', pad(din, 32)), ('dout0', raw_dout0), + ('dout1', raw_dout1) + ] + m.Instance(macro_name, 'ram', ports=ports, workaround=True) + else: + raise ValueError('not yet implemented') + else: + raise ValueError('not yet implemented') + + return m + + +def make_gf180mcu_sram( + name: str, + datawidth: int, + addrwidth: int, +) -> Module: + if datawidth <= 0 or addrwidth <= 0: + raise ValueError + + m = Module(name) + + clk = m.Input('clk') + ce = m.Input('ce') # active high + cen = m.Wire('cen') # active low + cen.assign(vtypes.Not(ce)) + we = m.Input('we') # active high + wen = m.Wire('wen') # active low + wen.assign(vtypes.Not(we)) + + addr = m.Input('addr', addrwidth) + din = m.Input('din', datawidth) + dout = m.Output('dout', datawidth) + + if addrwidth <= 9: + if addrwidth == 9: + macro_name = 'gf180mcu_fd_ip_sram__sram512x8m8wm1' + elif addrwidth == 8: + macro_name = 'gf180mcu_fd_ip_sram__sram256x8m8wm1' + elif addrwidth == 7: + macro_name = 'gf180mcu_fd_ip_sram__sram128x8m8wm1' + else: + macro_name = 'gf180mcu_fd_ip_sram__sram64x8m8wm1' + if addrwidth < 6: + addr = vtypes.Cat(vtypes.Int(0, 6 - addrwidth), addr) + for j in range(datawidth // 8): + m.Instance(macro_name, f'ram_{j}', + ports=[('CLK', clk), ('CEN', cen), ('GWEN', wen), + ('WEN', zero(8)), ('A', addr), + ('D', din[8*j:8*(j+1)]), + ('Q', dout[8*j:8*(j+1)])], + workaround=True) + if datawidth % 8 != 0: + tmp_in = m.Wire('tmp_in', 8) + tmp_in.assign(vtypes.Cat(vtypes.Int(0, 8 - datawidth%8), + din[datawidth // 8 * 8:])) + tmp_out = m.Wire('tmp_out', 8) + dout[datawidth // 8 * 8:].assign(tmp_out[:datawidth % 8]) + m.Instance(macro_name, f'ram_{datawidth // 8}', + ports=[('CLK', clk), ('CEN', cen), ('GWEN', wen), + ('WEN', zero(8)), ('A', addr), + ('D', tmp_in), ('Q', tmp_out)], + workaround=True) + else: + sram_addr = m.Wire('sram_addr', 9) + sram_addr.assign(addr[:9]) + mux_sel = m.Wire('mux_sel', addrwidth - 9) + mux_sel.assign(addr[9:]) + dout_list = [m.Wire(f'dout_{i}', datawidth) + for i in range(2**(addrwidth - 9))] + dout.assign(make_mux(dout_list, mux_sel)) + if datawidth % 8 != 0: + tmp_in = m.Wire('tmp_in', 8) + tmp_in.assign(vtypes.Cat(vtypes.Int(0, 8 - datawidth%8), + din[datawidth // 8 * 8:])) + for i in range(2**(addrwidth - 9)): + seln = m.Wire(f'seln_{i}') + seln.assign(mux_sel != i) + for j in range(datawidth // 8): + m.Instance('gf180mcu_fd_ip_sram__sram512x8m8wm1', + f'ram_{i}_{j}', + ports=[('CLK', clk), ('CEN', vtypes.Ors(cen, seln)), + ('GWEN', vtypes.Ors(wen, seln)), + ('WEN', zero(8)), ('A', sram_addr), + ('D', din[8*j:8*(j+1)]), + ('Q', dout_list[i][8*j:8*(j+1)])], + workaround=True) + if datawidth % 8 != 0: + tmp_out = m.Wire(f'tmp_out_{i}', 8) + dout_list[i][datawidth // 8 * 8:].assign(tmp_out[:datawidth % 8]) + m.Instance('gf180mcu_fd_ip_sram__sram512x8m8wm1', + f'ram_{i}_{datawidth // 8}', + ports=[('CLK', clk), ('CEN', vtypes.Ors(cen, seln)), + ('GWEN', vtypes.Ors(wen, seln)), + ('WEN', zero(8)), ('A', sram_addr), + ('D', tmp_in), ('Q', tmp_out)], + workaround=True) + + return m + + +class ASICSRAM(RAM): + def __init__( + self, + m: Module, + name: str, + clk: vtypes._Variable, + rst: vtypes._Variable, + datawidth: int, + addrwidth: int, + pdk: Literal['sky130', 'gf180mcu'] = 'sky130', + ): + self.m = m + self.name = name + self.clk = clk + self.rst = rst + self.datawidth = datawidth + self.addrwidth = addrwidth + self.pdk = pdk + + self.interfaces: list[dict[str, vtypes.Wire]] = [] + rw_if: dict[str, vtypes.Wire] = {} + rw_if['ce'] = m.Wire(f'{name}_ce0') + rw_if['we'] = m.Wire(f'{name}_we0') + rw_if['addr'] = m.Wire(f'{name}_addr0', addrwidth) + rw_if['din'] = m.Wire(f'{name}_din0', datawidth) + rw_if['dout'] = m.Wire(f'{name}_dout0', datawidth) + self.interfaces.append(rw_if) + if pdk == 'sky130': + r_if: dict[str, vtypes.Wire] = {} + r_if['ce'] = m.Wire(f'{name}_ce1') + r_if['addr'] = m.Wire(f'{name}_addr1', addrwidth) + r_if['dout'] = m.Wire(f'{name}_dout1', datawidth) + self.interfaces.append(r_if) + + if pdk == 'sky130': + mod = make_sky130_sram(f'{name}_mod', datawidth, addrwidth) + ports = [ + ('clk', clk), ('ce0', rw_if['ce']), ('ce1', r_if['ce']), + ('we', rw_if['we']), ('addr0', rw_if['addr']), + ('addr1', r_if['addr']), ('din', rw_if['din']), + ('dout0', rw_if['dout']), ('dout1', r_if['dout']) + ] + elif pdk == 'gf180mcu': + mod = make_gf180mcu_sram(f'{name}_mod', datawidth, addrwidth) + ports = [ + ('clk', clk), ('ce', rw_if['ce']), ('we', rw_if['we']), + ('addr', rw_if['addr']), ('din', rw_if['din']), + ('dout', rw_if['dout']) + ] + else: + raise ValueError(f'invalid PDK: {pdk}') + m.Instance(mod, name + '_inst', ports=ports) + + self.seq = Seq(m, name + '_seq', clk, rst) + + self.mutex = None + + def read_rtl(self, addr, port=0, cond=None): + """ + @return data, valid + """ + + cond = make_condition(cond) + + if cond is not None: + enable = cond + else: + enable = vtypes.Int(1, 1) + + util.add_mux(self.interfaces[port]['addr'], enable, addr) + util.add_enable_cond(self.interfaces[port]['ce'], enable, vtypes.Int(1, 1)) + + rdata = self.interfaces[port]['dout'] + rvalid = self.seq.Prev(enable, 1) + + return rdata, rvalid + + def write_rtl(self, addr, wdata, port=0, cond=None): + """ + @return None + """ + cond = make_condition(cond) + + if cond is not None: + enable = cond + else: + enable = vtypes.Int(1, 1) + + util.add_mux(self.interfaces[port]['addr'], enable, addr) + util.add_mux(self.interfaces[port]['din'], enable, wdata) + util.add_enable_cond(self.interfaces[port]['we'], enable, vtypes.Int(1, 1)) + util.add_enable_cond(self.interfaces[port]['ce'], enable, vtypes.Int(1, 1)) diff --git a/veriloggen/asic/util.py b/veriloggen/asic/util.py new file mode 100644 index 00000000..aa16e6e6 --- /dev/null +++ b/veriloggen/asic/util.py @@ -0,0 +1,12 @@ +from __future__ import annotations +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from typing import SupportsInt + from collections.abc import Sequence + from ..core.module import Module + + +def make_arr(m: Module, name: str, vals: Sequence[SupportsInt], width=32, signed=True): + arr = m.Reg(name, width, len(vals), signed) + m.Initial(*[arr[i](int(x)) for i, x in enumerate(vals)]) + return arr diff --git a/veriloggen/core/module.py b/veriloggen/core/module.py index 7f154bf2..fbe6ee1c 100644 --- a/veriloggen/core/module.py +++ b/veriloggen/core/module.py @@ -555,7 +555,7 @@ def GenerateIf(self, cond, scope=None): return t # ------------------------------------------------------------------------- - def Instance(self, module, instname, params=None, ports=None): + def Instance(self, module, instname, params=None, ports=None, workaround=False): if isinstance(module, str): module = StubModule(module) @@ -567,6 +567,9 @@ def Instance(self, module, instname, params=None, ports=None): self.instance[instname] = t self.items.append(t) + if workaround: + return t + mod = self.find_module(module.name) if mod is None: self.submodule[module.name] = module diff --git a/veriloggen/simulation/simulation.py b/veriloggen/simulation/simulation.py index 99e41f6c..0565fece 100644 --- a/veriloggen/simulation/simulation.py +++ b/veriloggen/simulation/simulation.py @@ -125,7 +125,7 @@ def _to_code(objs): def run_iverilog(objs, display=False, top=None, outputfile=None, - include=None, define=None, libdir=None): + include=None, define=None, libdir=None, timescale=None): if not isinstance(objs, (tuple, list)): objs = [objs] @@ -178,6 +178,11 @@ def run_iverilog(objs, display=False, top=None, outputfile=None, code = _to_code(objs) + if timescale is not None: + if isinstance(timescale, tuple): + timescale = timescale[0] + '/' + timescale[1] + code = '`timescale ' + timescale + '\n\n' + code + if os.name == 'nt': fd, path = tempfile.mkstemp() os.close(fd)