Skip to content

Commit 71ca43b

Browse files
authored
Revert an op version change (#3026)
Revert an op version change, it was brought in from #2999
1 parent cc8adc8 commit 71ca43b

File tree

8 files changed

+19
-19
lines changed

8 files changed

+19
-19
lines changed

onnxruntime/contrib_ops/cpu/layer_norm.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ namespace contrib {
1515
ONNX_OPERATOR_TYPED_KERNEL_EX( \
1616
LayerNormalization, \
1717
kOnnxDomain, \
18-
9, \
18+
1, \
1919
T, \
2020
kCpuExecutionProvider, \
2121
KernelDefBuilder() \

onnxruntime/contrib_ops/cpu_contrib_kernels.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSNchwcDomai
6767
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSNchwcDomain, 1, float, GlobalMaxPool);
6868
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSNchwcDomain, 1, float, AveragePool);
6969
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSNchwcDomain, 1, float, GlobalAveragePool);
70-
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, float, LayerNormalization);
71-
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, double, LayerNormalization);
70+
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, float, LayerNormalization);
71+
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, double, LayerNormalization);
7272
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSDomain, 1, float, SkipLayerNormalization);
7373
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSDomain, 1, double, SkipLayerNormalization);
7474

@@ -141,8 +141,8 @@ Status RegisterCpuContribKernels(KernelRegistry& kernel_registry) {
141141
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, ScaledTanh)>,
142142
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, 9, ThresholdedRelu)>,
143143
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, Scale)>,
144-
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, float, LayerNormalization)>,
145-
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, double, LayerNormalization)>,
144+
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, float, LayerNormalization)>,
145+
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 1, double, LayerNormalization)>,
146146
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSDomain, 1, float, SkipLayerNormalization)>,
147147
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMSDomain, 1, double, SkipLayerNormalization)>,
148148
};

onnxruntime/contrib_ops/cuda/layer_norm.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ namespace cuda {
1515
ONNX_OPERATOR_TYPED_KERNEL_EX( \
1616
LayerNormalization, \
1717
kOnnxDomain, \
18-
9, \
18+
1, \
1919
T##_##U, \
2020
kCudaExecutionProvider, \
2121
KernelDefBuilder() \

onnxruntime/contrib_ops/cuda_contrib_kernels.cc

+6-6
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,9 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kMSDomain, 1
4747
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, float, ThresholdedRelu);
4848
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, double, ThresholdedRelu);
4949
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, MLFloat16, ThresholdedRelu);
50-
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 9, float_float, LayerNormalization);
51-
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 9, double_float, LayerNormalization);
52-
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 9, MLFloat16_float, LayerNormalization);
50+
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, float_float, LayerNormalization);
51+
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, double_float, LayerNormalization);
52+
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, MLFloat16_float, LayerNormalization);
5353

5454
void RegisterCudaContribKernels(KernelRegistry& kernel_registry) {
5555
static const BuildKernelCreateInfoFn function_table[] = {
@@ -91,9 +91,9 @@ void RegisterCudaContribKernels(KernelRegistry& kernel_registry) {
9191
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, float, ThresholdedRelu)>,
9292
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, double, ThresholdedRelu)>,
9393
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, MLFloat16, ThresholdedRelu)>,
94-
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 9, float_float, LayerNormalization)>,
95-
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 9, double_float, LayerNormalization)>,
96-
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 9, MLFloat16_float, LayerNormalization)>};
94+
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, float_float, LayerNormalization)>,
95+
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, double_float, LayerNormalization)>,
96+
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCudaExecutionProvider, kOnnxDomain, 1, MLFloat16_float, LayerNormalization)>};
9797

9898
for (auto& function_table_entry : function_table) {
9999
kernel_registry.Register(function_table_entry());

onnxruntime/core/graph/contrib_ops/contrib_defs.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -2387,7 +2387,7 @@ Example 4:
23872387

23882388
ONNX_CONTRIB_OPERATOR_SCHEMA(LayerNormalization)
23892389
.SetDomain(kOnnxDomain)
2390-
.SinceVersion(9)
2390+
.SinceVersion(1)
23912391
.SetSupportLevel(OpSchema::SupportType::EXPERIMENTAL)
23922392
.SetDoc("LayerNormalization")
23932393
.Attr("axis",

onnxruntime/core/optimizer/attention_fusion.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ Status AttentionFusion::ApplyImpl(Graph& graph, bool& modified, int graph_level,
280280
ORT_RETURN_IF_ERROR(Recurse(node, modified, graph_level, logger));
281281

282282
if (node.GetOutputEdgesCount() == 4 &&
283-
graph_utils::IsSupportedOptypeVersionAndDomain(node, "LayerNormalization", {9}, kOnnxDomain) &&
283+
graph_utils::IsSupportedOptypeVersionAndDomain(node, "LayerNormalization", {1}, kOnnxDomain) &&
284284
graph_utils::IsSupportedProvider(node, GetCompatibleExecutionProviders())) {
285285
// Get hidden size from layer norm bias tensor shape.
286286
const NodeArg& layer_norm_bias = *(node.InputDefs()[2]);
@@ -389,7 +389,7 @@ bool AttentionFusion::FuseSubGraph(Node& layer_norm, const Node& add_after_layer
389389
{0, 0, "Reshape", {5}, kOnnxDomain},
390390
{0, 0, "Add", {7}, kOnnxDomain},
391391
{0, 0, "MatMul", {1, 9}, kOnnxDomain},
392-
{0, 0, "LayerNormalization", {9}, kOnnxDomain}};
392+
{0, 0, "LayerNormalization", {1}, kOnnxDomain}};
393393

394394
std::vector<const Node::EdgeEnd*> edges;
395395
if (!graph_utils::FindPath(add_after_layer_norm, true, parent_path, edges, logger)) {
@@ -532,7 +532,7 @@ bool AttentionFusion::FuseSubGraph(Node& layer_norm, const Node& add_after_layer
532532
{0, 0, "Reshape", {5}, kOnnxDomain},
533533
{0, 0, "Add", {7}, kOnnxDomain},
534534
{0, 0, "MatMul", {1, 9}, kOnnxDomain},
535-
{0, 0, "LayerNormalization", {9}, kOnnxDomain}};
535+
{0, 0, "LayerNormalization", {1}, kOnnxDomain}};
536536

537537
if (!graph_utils::FindPath(mask_add, true, q_path, edges, logger)) {
538538
DEBUG_LOG("Failed to find path for q");
@@ -583,7 +583,7 @@ bool AttentionFusion::FuseSubGraph(Node& layer_norm, const Node& add_after_layer
583583
{0, 0, "Reshape", {5}, kOnnxDomain},
584584
{0, 0, "Add", {7}, kOnnxDomain},
585585
{0, 0, "MatMul", {1, 9}, kOnnxDomain},
586-
{0, 0, "LayerNormalization", {9}, kOnnxDomain}};
586+
{0, 0, "LayerNormalization", {1}, kOnnxDomain}};
587587

588588
if (!graph_utils::FindPath(qk_matmul, true, k_path, edges, logger)) {
589589
DEBUG_LOG("Failed to find path for k");

onnxruntime/core/optimizer/embed_layer_norm_fusion.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,7 @@ Status EmbedLayerNormFusion::ApplyImpl(Graph& graph, bool& modified, int graph_l
498498

499499
Node& layer_norm_node = *p_layer_norm;
500500
ORT_RETURN_IF_ERROR(Recurse(layer_norm_node, modified, graph_level, logger));
501-
if (!graph_utils::IsSupportedOptypeVersionAndDomain(layer_norm_node, "LayerNormalization", {9}, kOnnxDomain) ||
501+
if (!graph_utils::IsSupportedOptypeVersionAndDomain(layer_norm_node, "LayerNormalization", {1}, kOnnxDomain) ||
502502
!graph_utils::IsSupportedProvider(layer_norm_node, GetCompatibleExecutionProviders())) {
503503
continue;
504504
}

onnxruntime/core/optimizer/skip_layer_norm_fusion.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ Status SkipLayerNormFusion::ApplyImpl(Graph& graph, bool& modified, int graph_le
139139
Node& ln_node = *p_layernorm;
140140
ORT_RETURN_IF_ERROR(Recurse(ln_node, modified, graph_level, logger));
141141

142-
if (!graph_utils::IsSupportedOptypeVersionAndDomain(ln_node, "LayerNormalization", {9}) ||
142+
if (!graph_utils::IsSupportedOptypeVersionAndDomain(ln_node, "LayerNormalization", {1}) ||
143143
!graph_utils::IsSupportedProvider(ln_node, GetCompatibleExecutionProviders()) ||
144144
!IsSupportedDataType(ln_node)) {
145145
continue;

0 commit comments

Comments
 (0)