nexaml's picture
Upload 17 files
d7fe7b7 verified
program(1.3)
[buildInfo = dict<string, string>({{"coremlc-component-MIL", "3405.2.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.3.0"}})]
{
func main<ios18>(tensor<fp16, [2, 1, 640]> cell_state, tensor<fp16, [1, 1, 640]> embedded_y, tensor<fp16, [2, 1, 640]> hidden_state) {
tensor<int32, [3]> var_10_begin_0 = const()[name = string("op_10_begin_0"), val = tensor<int32, [3]>([0, 0, 0])];
tensor<int32, [3]> var_10_end_0 = const()[name = string("op_10_end_0"), val = tensor<int32, [3]>([1, 1, 640])];
tensor<bool, [3]> var_10_end_mask_0 = const()[name = string("op_10_end_mask_0"), val = tensor<bool, [3]>([false, true, true])];
tensor<bool, [3]> var_10_squeeze_mask_0 = const()[name = string("op_10_squeeze_mask_0"), val = tensor<bool, [3]>([true, false, false])];
tensor<fp16, [1, 640]> var_10_cast_fp16 = slice_by_index(begin = var_10_begin_0, end = var_10_end_0, end_mask = var_10_end_mask_0, squeeze_mask = var_10_squeeze_mask_0, x = hidden_state)[name = string("op_10_cast_fp16")];
tensor<int32, [3]> var_13_begin_0 = const()[name = string("op_13_begin_0"), val = tensor<int32, [3]>([1, 0, 0])];
tensor<int32, [3]> var_13_end_0 = const()[name = string("op_13_end_0"), val = tensor<int32, [3]>([2, 1, 640])];
tensor<bool, [3]> var_13_end_mask_0 = const()[name = string("op_13_end_mask_0"), val = tensor<bool, [3]>([false, true, true])];
tensor<bool, [3]> var_13_squeeze_mask_0 = const()[name = string("op_13_squeeze_mask_0"), val = tensor<bool, [3]>([true, false, false])];
tensor<fp16, [1, 640]> var_13_cast_fp16 = slice_by_index(begin = var_13_begin_0, end = var_13_end_0, end_mask = var_13_end_mask_0, squeeze_mask = var_13_squeeze_mask_0, x = hidden_state)[name = string("op_13_cast_fp16")];
tensor<int32, [3]> var_16_begin_0 = const()[name = string("op_16_begin_0"), val = tensor<int32, [3]>([0, 0, 0])];
tensor<int32, [3]> var_16_end_0 = const()[name = string("op_16_end_0"), val = tensor<int32, [3]>([1, 1, 640])];
tensor<bool, [3]> var_16_end_mask_0 = const()[name = string("op_16_end_mask_0"), val = tensor<bool, [3]>([false, true, true])];
tensor<bool, [3]> var_16_squeeze_mask_0 = const()[name = string("op_16_squeeze_mask_0"), val = tensor<bool, [3]>([true, false, false])];
tensor<fp16, [1, 640]> var_16_cast_fp16 = slice_by_index(begin = var_16_begin_0, end = var_16_end_0, end_mask = var_16_end_mask_0, squeeze_mask = var_16_squeeze_mask_0, x = cell_state)[name = string("op_16_cast_fp16")];
tensor<int32, [3]> var_19_begin_0 = const()[name = string("op_19_begin_0"), val = tensor<int32, [3]>([1, 0, 0])];
tensor<int32, [3]> var_19_end_0 = const()[name = string("op_19_end_0"), val = tensor<int32, [3]>([2, 1, 640])];
tensor<bool, [3]> var_19_end_mask_0 = const()[name = string("op_19_end_mask_0"), val = tensor<bool, [3]>([false, true, true])];
tensor<bool, [3]> var_19_squeeze_mask_0 = const()[name = string("op_19_squeeze_mask_0"), val = tensor<bool, [3]>([true, false, false])];
tensor<fp16, [1, 640]> var_19_cast_fp16 = slice_by_index(begin = var_19_begin_0, end = var_19_end_0, end_mask = var_19_end_mask_0, squeeze_mask = var_19_squeeze_mask_0, x = cell_state)[name = string("op_19_cast_fp16")];
tensor<int32, [1]> hx_1_axes_0 = const()[name = string("hx_1_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 1, 640]> hx_1_cast_fp16 = expand_dims(axes = hx_1_axes_0, x = var_10_cast_fp16)[name = string("hx_1_cast_fp16")];
tensor<int32, [1]> hx_3_axes_0 = const()[name = string("hx_3_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 1, 640]> hx_3_cast_fp16 = expand_dims(axes = hx_3_axes_0, x = var_16_cast_fp16)[name = string("hx_3_cast_fp16")];
tensor<int32, [3]> cast_0_batch_first_transpose_perm_0 = const()[name = string("cast_0_batch_first_transpose_perm_0"), val = tensor<int32, [3]>([1, 0, 2])];
tensor<int32, [1]> input_batch_first_lstm_h0_squeeze_axes_0 = const()[name = string("input_batch_first_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 640]> input_batch_first_lstm_h0_squeeze_cast_fp16 = squeeze(axes = input_batch_first_lstm_h0_squeeze_axes_0, x = hx_1_cast_fp16)[name = string("input_batch_first_lstm_h0_squeeze_cast_fp16")];
tensor<int32, [1]> input_batch_first_lstm_c0_squeeze_axes_0 = const()[name = string("input_batch_first_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 640]> input_batch_first_lstm_c0_squeeze_cast_fp16 = squeeze(axes = input_batch_first_lstm_c0_squeeze_axes_0, x = hx_3_cast_fp16)[name = string("input_batch_first_lstm_c0_squeeze_cast_fp16")];
string input_batch_first_direction_0 = const()[name = string("input_batch_first_direction_0"), val = string("forward")];
bool input_batch_first_output_sequence_0 = const()[name = string("input_batch_first_output_sequence_0"), val = bool(true)];
string input_batch_first_recurrent_activation_0 = const()[name = string("input_batch_first_recurrent_activation_0"), val = string("sigmoid")];
string input_batch_first_cell_activation_0 = const()[name = string("input_batch_first_cell_activation_0"), val = string("tanh")];
string input_batch_first_activation_0 = const()[name = string("input_batch_first_activation_0"), val = string("tanh")];
tensor<fp16, [2560, 640]> concat_1_to_fp16 = const()[name = string("concat_1_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
tensor<fp16, [2560, 640]> concat_2_to_fp16 = const()[name = string("concat_2_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(3276928)))];
tensor<fp16, [2560]> concat_0_to_fp16 = const()[name = string("concat_0_to_fp16"), val = tensor<fp16, [2560]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(6553792)))];
tensor<fp16, [1, 1, 640]> cast_0_batch_first_transpose_cast_fp16 = transpose(perm = cast_0_batch_first_transpose_perm_0, x = embedded_y)[name = string("transpose_1")];
tensor<fp16, [1, 1, 640]> input_batch_first_cast_fp16_0, tensor<fp16, [1, 640]> input_batch_first_cast_fp16_1, tensor<fp16, [1, 640]> input_batch_first_cast_fp16_2 = lstm(activation = input_batch_first_activation_0, bias = concat_0_to_fp16, cell_activation = input_batch_first_cell_activation_0, direction = input_batch_first_direction_0, initial_c = input_batch_first_lstm_c0_squeeze_cast_fp16, initial_h = input_batch_first_lstm_h0_squeeze_cast_fp16, output_sequence = input_batch_first_output_sequence_0, recurrent_activation = input_batch_first_recurrent_activation_0, weight_hh = concat_2_to_fp16, weight_ih = concat_1_to_fp16, x = cast_0_batch_first_transpose_cast_fp16)[name = string("input_batch_first_cast_fp16")];
tensor<int32, [1]> layer_h_1_axes_0 = const()[name = string("layer_h_1_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 1, 640]> layer_h_1_cast_fp16 = expand_dims(axes = layer_h_1_axes_0, x = input_batch_first_cast_fp16_1)[name = string("layer_h_1_cast_fp16")];
tensor<int32, [1]> layer_c_1_axes_0 = const()[name = string("layer_c_1_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 1, 640]> layer_c_1_cast_fp16 = expand_dims(axes = layer_c_1_axes_0, x = input_batch_first_cast_fp16_2)[name = string("layer_c_1_cast_fp16")];
tensor<int32, [1]> var_42_axes_0 = const()[name = string("op_42_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 640]> var_42_cast_fp16 = squeeze(axes = var_42_axes_0, x = layer_h_1_cast_fp16)[name = string("op_42_cast_fp16")];
tensor<int32, [1]> var_44_axes_0 = const()[name = string("op_44_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 640]> var_44_cast_fp16 = squeeze(axes = var_44_axes_0, x = layer_c_1_cast_fp16)[name = string("op_44_cast_fp16")];
tensor<int32, [1]> hx_5_axes_0 = const()[name = string("hx_5_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 1, 640]> hx_5_cast_fp16 = expand_dims(axes = hx_5_axes_0, x = var_13_cast_fp16)[name = string("hx_5_cast_fp16")];
tensor<int32, [1]> hx_axes_0 = const()[name = string("hx_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 1, 640]> hx_cast_fp16 = expand_dims(axes = hx_axes_0, x = var_19_cast_fp16)[name = string("hx_cast_fp16")];
tensor<int32, [1]> var_59_batch_first_lstm_h0_squeeze_axes_0 = const()[name = string("op_59_batch_first_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 640]> var_59_batch_first_lstm_h0_squeeze_cast_fp16 = squeeze(axes = var_59_batch_first_lstm_h0_squeeze_axes_0, x = hx_5_cast_fp16)[name = string("op_59_batch_first_lstm_h0_squeeze_cast_fp16")];
tensor<int32, [1]> var_59_batch_first_lstm_c0_squeeze_axes_0 = const()[name = string("op_59_batch_first_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 640]> var_59_batch_first_lstm_c0_squeeze_cast_fp16 = squeeze(axes = var_59_batch_first_lstm_c0_squeeze_axes_0, x = hx_cast_fp16)[name = string("op_59_batch_first_lstm_c0_squeeze_cast_fp16")];
string var_59_batch_first_direction_0 = const()[name = string("op_59_batch_first_direction_0"), val = string("forward")];
bool var_59_batch_first_output_sequence_0 = const()[name = string("op_59_batch_first_output_sequence_0"), val = bool(true)];
string var_59_batch_first_recurrent_activation_0 = const()[name = string("op_59_batch_first_recurrent_activation_0"), val = string("sigmoid")];
string var_59_batch_first_cell_activation_0 = const()[name = string("op_59_batch_first_cell_activation_0"), val = string("tanh")];
string var_59_batch_first_activation_0 = const()[name = string("op_59_batch_first_activation_0"), val = string("tanh")];
tensor<fp16, [2560, 640]> concat_4_to_fp16 = const()[name = string("concat_4_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(6558976)))];
tensor<fp16, [2560, 640]> concat_5_to_fp16 = const()[name = string("concat_5_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(9835840)))];
tensor<fp16, [2560]> concat_3_to_fp16 = const()[name = string("concat_3_to_fp16"), val = tensor<fp16, [2560]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(13112704)))];
tensor<fp16, [1, 1, 640]> var_59_batch_first_cast_fp16_0, tensor<fp16, [1, 640]> var_59_batch_first_cast_fp16_1, tensor<fp16, [1, 640]> var_59_batch_first_cast_fp16_2 = lstm(activation = var_59_batch_first_activation_0, bias = concat_3_to_fp16, cell_activation = var_59_batch_first_cell_activation_0, direction = var_59_batch_first_direction_0, initial_c = var_59_batch_first_lstm_c0_squeeze_cast_fp16, initial_h = var_59_batch_first_lstm_h0_squeeze_cast_fp16, output_sequence = var_59_batch_first_output_sequence_0, recurrent_activation = var_59_batch_first_recurrent_activation_0, weight_hh = concat_5_to_fp16, weight_ih = concat_4_to_fp16, x = input_batch_first_cast_fp16_0)[name = string("op_59_batch_first_cast_fp16")];
tensor<int32, [3]> var_59_perm_0 = const()[name = string("op_59_perm_0"), val = tensor<int32, [3]>([1, 0, 2])];
tensor<int32, [1]> layer_h_axes_0 = const()[name = string("layer_h_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 1, 640]> layer_h_cast_fp16 = expand_dims(axes = layer_h_axes_0, x = var_59_batch_first_cast_fp16_1)[name = string("layer_h_cast_fp16")];
tensor<int32, [1]> layer_c_axes_0 = const()[name = string("layer_c_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 1, 640]> layer_c_cast_fp16 = expand_dims(axes = layer_c_axes_0, x = var_59_batch_first_cast_fp16_2)[name = string("layer_c_cast_fp16")];
tensor<int32, [1]> var_67_axes_0 = const()[name = string("op_67_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 640]> var_67_cast_fp16 = squeeze(axes = var_67_axes_0, x = layer_h_cast_fp16)[name = string("op_67_cast_fp16")];
tensor<int32, [1]> var_69_axes_0 = const()[name = string("op_69_axes_0"), val = tensor<int32, [1]>([0])];
tensor<fp16, [1, 640]> var_69_cast_fp16 = squeeze(axes = var_69_axes_0, x = layer_c_cast_fp16)[name = string("op_69_cast_fp16")];
int32 var_72_axis_0 = const()[name = string("op_72_axis_0"), val = int32(0)];
tensor<fp16, [2, 1, 640]> hidden = stack(axis = var_72_axis_0, values = (var_42_cast_fp16, var_67_cast_fp16))[name = string("op_72_cast_fp16")];
int32 var_75_axis_0 = const()[name = string("op_75_axis_0"), val = int32(0)];
tensor<fp16, [2, 1, 640]> cell = stack(axis = var_75_axis_0, values = (var_44_cast_fp16, var_69_cast_fp16))[name = string("op_75_cast_fp16")];
tensor<fp16, [1, 1, 640]> decoder_out = transpose(perm = var_59_perm_0, x = var_59_batch_first_cast_fp16_0)[name = string("transpose_0")];
} -> (decoder_out, hidden, cell);
}