|
21 | 21 | from paddle.base.framework import in_dynamic_or_pir_mode |
22 | 22 | from paddle.utils.decorator_utils import ( |
23 | 23 | ParamAliasDecorator, |
24 | | - legacy_reduction_guard, |
25 | | - legacy_reduction_special_guard, |
| 24 | + legacy_reduction_decorator, |
| 25 | + legacy_reduction_special_decorator, |
26 | 26 | ) |
27 | 27 |
|
28 | 28 | from .. import functional as F |
@@ -125,7 +125,7 @@ class BCEWithLogitsLoss(Layer): |
125 | 125 | pos_weight: Tensor | None |
126 | 126 | name: str | None |
127 | 127 |
|
128 | | - @legacy_reduction_guard |
| 128 | + @legacy_reduction_decorator |
129 | 129 | def __init__( |
130 | 130 | self, |
131 | 131 | weight: Tensor | None = None, |
@@ -423,7 +423,7 @@ class CrossEntropyLoss(Layer): |
423 | 423 | label_smoothing: float |
424 | 424 | name: str | None |
425 | 425 |
|
426 | | - @legacy_reduction_special_guard |
| 426 | + @legacy_reduction_special_decorator |
427 | 427 | def __init__( |
428 | 428 | self, |
429 | 429 | weight: Tensor | None = None, |
@@ -662,7 +662,7 @@ class MSELoss(Layer): |
662 | 662 |
|
663 | 663 | reduction: _ReduceMode |
664 | 664 |
|
665 | | - @legacy_reduction_guard |
| 665 | + @legacy_reduction_decorator |
666 | 666 | def __init__(self, reduction: _ReduceMode = 'mean'): |
667 | 667 | super().__init__() |
668 | 668 | if reduction not in ['sum', 'mean', 'none']: |
@@ -766,7 +766,7 @@ class L1Loss(Layer): |
766 | 766 | reduction: _ReduceMode |
767 | 767 | name: str | None |
768 | 768 |
|
769 | | - @legacy_reduction_guard |
| 769 | + @legacy_reduction_decorator |
770 | 770 | def __init__( |
771 | 771 | self, reduction: _ReduceMode = 'mean', name: str | None = None |
772 | 772 | ) -> None: |
@@ -857,7 +857,7 @@ class BCELoss(Layer): |
857 | 857 | reduction: _ReduceMode |
858 | 858 | name: str | None |
859 | 859 |
|
860 | | - @legacy_reduction_guard |
| 860 | + @legacy_reduction_decorator |
861 | 861 | def __init__( |
862 | 862 | self, |
863 | 863 | weight: Tensor | None = None, |
@@ -970,7 +970,7 @@ class NLLLoss(Layer): |
970 | 970 |
|
971 | 971 | """ |
972 | 972 |
|
973 | | - @legacy_reduction_guard |
| 973 | + @legacy_reduction_decorator |
974 | 974 | def __init__( |
975 | 975 | self, |
976 | 976 | weight: Tensor | None = None, |
@@ -1059,7 +1059,7 @@ class PoissonNLLLoss(Layer): |
1059 | 1059 |
|
1060 | 1060 | """ |
1061 | 1061 |
|
1062 | | - @legacy_reduction_guard |
| 1062 | + @legacy_reduction_decorator |
1063 | 1063 | def __init__( |
1064 | 1064 | self, |
1065 | 1065 | log_input: bool = True, |
@@ -1191,7 +1191,7 @@ class KLDivLoss(Layer): |
1191 | 1191 | reduction: _ReduceMode |
1192 | 1192 | log_target: bool |
1193 | 1193 |
|
1194 | | - @legacy_reduction_special_guard |
| 1194 | + @legacy_reduction_special_decorator |
1195 | 1195 | def __init__( |
1196 | 1196 | self, reduction: _ReduceMode = 'mean', log_target: bool = False |
1197 | 1197 | ) -> None: |
@@ -1264,7 +1264,7 @@ class MarginRankingLoss(Layer): |
1264 | 1264 | reduction: _ReduceMode |
1265 | 1265 | name: str | None |
1266 | 1266 |
|
1267 | | - @legacy_reduction_guard |
| 1267 | + @legacy_reduction_decorator |
1268 | 1268 | def __init__( |
1269 | 1269 | self, |
1270 | 1270 | margin: float = 0.0, |
@@ -1538,7 +1538,7 @@ class SmoothL1Loss(Layer): |
1538 | 1538 | delta: float |
1539 | 1539 | name: str | None |
1540 | 1540 |
|
1541 | | - @legacy_reduction_guard |
| 1541 | + @legacy_reduction_decorator |
1542 | 1542 | def __init__( |
1543 | 1543 | self, |
1544 | 1544 | reduction: _ReduceMode = 'mean', |
@@ -1628,7 +1628,7 @@ class MultiLabelSoftMarginLoss(Layer): |
1628 | 1628 | reduction: _ReduceMode |
1629 | 1629 | name: str | None |
1630 | 1630 |
|
1631 | | - @legacy_reduction_guard |
| 1631 | + @legacy_reduction_decorator |
1632 | 1632 | def __init__( |
1633 | 1633 | self, |
1634 | 1634 | weight: Tensor | None = None, |
@@ -1741,7 +1741,7 @@ class HingeEmbeddingLoss(Layer): |
1741 | 1741 | reduction: _ReduceMode |
1742 | 1742 | name: str | None |
1743 | 1743 |
|
1744 | | - @legacy_reduction_guard |
| 1744 | + @legacy_reduction_decorator |
1745 | 1745 | def __init__( |
1746 | 1746 | self, |
1747 | 1747 | margin: float = 1.0, |
@@ -1840,7 +1840,7 @@ class CosineEmbeddingLoss(Layer): |
1840 | 1840 | reduction: _ReduceMode |
1841 | 1841 | name: str | None |
1842 | 1842 |
|
1843 | | - @legacy_reduction_guard |
| 1843 | + @legacy_reduction_decorator |
1844 | 1844 | def __init__( |
1845 | 1845 | self, |
1846 | 1846 | margin: float = 0, |
@@ -2078,7 +2078,7 @@ class TripletMarginLoss(Layer): |
2078 | 2078 | reduction: _ReduceMode |
2079 | 2079 | name: str | None |
2080 | 2080 |
|
2081 | | - @legacy_reduction_guard |
| 2081 | + @legacy_reduction_decorator |
2082 | 2082 | def __init__( |
2083 | 2083 | self, |
2084 | 2084 | margin: float = 1.0, |
@@ -2195,7 +2195,7 @@ class MultiMarginLoss(Layer): |
2195 | 2195 | reduction: _ReduceMode |
2196 | 2196 | name: str | None |
2197 | 2197 |
|
2198 | | - @legacy_reduction_guard |
| 2198 | + @legacy_reduction_decorator |
2199 | 2199 | def __init__( |
2200 | 2200 | self, |
2201 | 2201 | p: int = 1, |
@@ -2291,7 +2291,7 @@ class MultiLabelMarginLoss(Layer): |
2291 | 2291 | reduction: _ReduceMode |
2292 | 2292 | name: str | None |
2293 | 2293 |
|
2294 | | - @legacy_reduction_guard |
| 2294 | + @legacy_reduction_decorator |
2295 | 2295 | def __init__( |
2296 | 2296 | self, |
2297 | 2297 | reduction: _ReduceMode = 'mean', |
@@ -2380,7 +2380,7 @@ class SoftMarginLoss(Layer): |
2380 | 2380 | reduction: _ReduceMode |
2381 | 2381 | name: str | None |
2382 | 2382 |
|
2383 | | - @legacy_reduction_guard |
| 2383 | + @legacy_reduction_decorator |
2384 | 2384 | def __init__( |
2385 | 2385 | self, reduction: _ReduceMode = 'mean', name: str | None = None |
2386 | 2386 | ) -> None: |
|
0 commit comments