Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,9 @@ AddModel(Pow_F32_R2)
AddModel(Pow_F32_R2_3)
AddModel(Pow_F32_R4)
AddModel(PReLU_F32_R4)
# AddModel(Range_F32_R0_1) --> Does't support dynamic shape output
# AddModel(Range_F32_R0_2) --> Does't support dynamic shape output
# AddModel(Range_F32_R0_3) --> Does't support dynamic shape output
Comment on lines +132 to +134
Copy link
Contributor Author

@shs-park shs-park Feb 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does't support dynamic shape output

related comment:

AddModel(Reciprocal_F32_R4)
AddModel(ReduceMax_F32_R2)
AddModel(ReduceMax_F32_R2_d0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,10 @@ AddModel(QuantizeLinear_F32_R3_ui8)
AddModel(QuantizeLinear_F32_R3_ui8_fq)
AddModel(QuantizeLinear_F32_R4_i16_cw)
AddModel(QuantizeLinear_F32_R4_ui8_cw)
# TODO activate after Range op support
# AddModel(Range_F32_R0_1)
# AddModel(Range_F32_R0_2)
# AddModel(Range_F32_R0_3)
AddModel(Reciprocal_F32_R4)
AddModel(ReduceMax_F32_R2)
AddModel(ReduceMax_F32_R2_d0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,9 @@ AddModel(PReLU_F32_R4)
# AddModel(QuantizeLinear_F32_R3_ui8_fq)
# AddModel(QuantizeLinear_F32_R4_i16_cw)
# AddModel(QuantizeLinear_F32_R4_ui8_cw)
# AddModel(Range_F32_R0_1) --> Does't support dynamic shape output
# AddModel(Range_F32_R0_2) --> Does't support dynamic shape output
# AddModel(Range_F32_R0_3) --> Does't support dynamic shape output
AddModel(Reciprocal_F32_R4)
AddModel(ReduceMax_F32_R2)
AddModel(ReduceMax_F32_R2_d0)
Expand Down
20 changes: 20 additions & 0 deletions circle-mlir/models/unit/Range_F32_R0_1/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import torch


# Generate Range operator with Float32, scalar
class net_Range(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, input):
limit = input
return torch.arange(0, limit, 1, dtype=torch.float32)

def onnx_opset_version(self):
return 11


_model_ = net_Range()

# produce float32 scalar with fixed number
_inputs_ = torch.tensor(10, dtype=torch.float32)
19 changes: 19 additions & 0 deletions circle-mlir/models/unit/Range_F32_R0_2/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import torch


# Generate Range operator with Float32, scalar
class net_Range(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, limit, delta):
return torch.arange(0, limit, delta, dtype=torch.float32)

def onnx_opset_version(self):
return 11


_model_ = net_Range()

# produce float32 scalar with fixed number
_inputs_ = (torch.tensor(10, dtype=torch.float32), torch.tensor(1, dtype=torch.float32))
20 changes: 20 additions & 0 deletions circle-mlir/models/unit/Range_F32_R0_3/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import torch


# Generate Range operator with Float32, scalar
class net_Range(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, start, limit, delta):
return torch.arange(start, limit, delta, dtype=torch.float32)

def onnx_opset_version(self):
return 11


_model_ = net_Range()

# produce float32 scalar with fixed number
_inputs_ = (torch.tensor(0, dtype=torch.float32), torch.tensor(10, dtype=torch.float32),
torch.tensor(1, dtype=torch.float32))