From 5d8967eb8838f27f70f405c4ac2a7e8199229150 Mon Sep 17 00:00:00 2001 From: Aleksei Zinovev Date: Tue, 9 May 2023 13:02:38 +0200 Subject: [PATCH] Made properties public (#538) (#553) (cherry picked from commit b4450a34b67687c15d1351dea93176b42bf27843) --- .../kotlinx/dl/api/core/optimizer/AdaDelta.kt | 6 +++--- .../kotlinx/dl/api/core/optimizer/AdaGrad.kt | 4 ++-- .../kotlinx/dl/api/core/optimizer/AdaGradDA.kt | 8 ++++---- .../jetbrains/kotlinx/dl/api/core/optimizer/Adam.kt | 10 +++++----- .../kotlinx/dl/api/core/optimizer/Adamax.kt | 8 ++++---- .../jetbrains/kotlinx/dl/api/core/optimizer/Ftrl.kt | 12 ++++++------ .../kotlinx/dl/api/core/optimizer/Momentum.kt | 6 +++--- .../kotlinx/dl/api/core/optimizer/RMSProp.kt | 10 +++++----- .../jetbrains/kotlinx/dl/api/core/optimizer/SGD.kt | 2 +- 9 files changed, 33 insertions(+), 33 deletions(-) diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaDelta.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaDelta.kt index a41302c33..c1fb91acc 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaDelta.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaDelta.kt @@ -46,9 +46,9 @@ private const val ACCUMULATOR_UPDATE = "accum_update" * @property [epsilon] Float >= 0. Fuzz factor. */ public class AdaDelta( - private val learningRate: Float = 0.1f, - private val rho: Float = 0.95f, - private val epsilon: Float = 1e-8f, + public val learningRate: Float = 0.1f, + public val rho: Float = 0.95f, + public val epsilon: Float = 1e-8f, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) { private lateinit var epsilonConstant: Constant diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGrad.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGrad.kt index cd6293b88..d4d69dc61 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGrad.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGrad.kt @@ -40,8 +40,8 @@ private const val ACCUMULATOR = "accumulator" * @property [initialAccumulatorValue] Decay: Float >= 0. Learning rate decay over each update. */ public class AdaGrad( - private val learningRate: Float = 0.1f, - private val initialAccumulatorValue: Float = 0.01f, + public val learningRate: Float = 0.1f, + public val initialAccumulatorValue: Float = 0.01f, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) { private lateinit var initialAccumulatorValueConstant: Constant diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGradDA.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGradDA.kt index 310de4987..8d2521d4b 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGradDA.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/AdaGradDA.kt @@ -46,10 +46,10 @@ private const val SQUARED_ACCUMULATOR = "gradient_squared_accumulator" * @property [l2Strength] A float value, must be greater than or equal to zero. */ public class AdaGradDA( - private val learningRate: Float = 0.1f, - private val initialAccumulatorValue: Float = 0.01f, - private val l1Strength: Float = 0.01f, - private val l2Strength: Float = 0.01f, + public val learningRate: Float = 0.1f, + public val initialAccumulatorValue: Float = 0.01f, + public val l1Strength: Float = 0.01f, + public val l2Strength: Float = 0.01f, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) { private lateinit var learningRateConst: Constant diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adam.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adam.kt index b8a05f248..7481b9f8d 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adam.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adam.kt @@ -45,11 +45,11 @@ private val SECOND_BETA_POWER_NAME = defaultOptimizerVariableName("beta2_power") * @property [epsilon] Float >= 0. Fuzz factor. */ public class Adam( - private val learningRate: Float = 0.001f, - private val beta1: Float = 0.9f, - private val beta2: Float = 0.999f, - private val epsilon: Float = 1e-07f, - private val useNesterov: Boolean = false, + public val learningRate: Float = 0.001f, + public val beta1: Float = 0.9f, + public val beta2: Float = 0.999f, + public val epsilon: Float = 1e-07f, + public val useNesterov: Boolean = false, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) { diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adamax.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adamax.kt index 33954e229..137885630 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adamax.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Adamax.kt @@ -46,10 +46,10 @@ private val FIRST_BETA_POWER_NAME = defaultOptimizerVariableName("beta1_power") * @property [epsilon] Float >= 0. Fuzz factor. */ public class Adamax( - private val learningRate: Float = 0.001f, - private val beta1: Float = 0.9f, - private val beta2: Float = 0.999f, - private val epsilon: Float = 1e-07f, + public val learningRate: Float = 0.001f, + public val beta1: Float = 0.9f, + public val beta2: Float = 0.999f, + public val epsilon: Float = 1e-07f, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) { diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Ftrl.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Ftrl.kt index 23eb07204..b416a5485 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Ftrl.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Ftrl.kt @@ -52,12 +52,12 @@ private const val LINEAR_ACCUMULATOR = "linear_accumulator" * When input is sparse shrinkage will only happen on the active weights. */ public class Ftrl( - private val learningRate: Float = 0.001f, - private val l1RegularizationStrength: Float = 0.0f, - private val l2RegularizationStrength: Float = 0.0f, - private val learningRatePower: Float = -0.5f, - private val l2ShrinkageRegularizationStrength: Float = 0.0f, - private var initialAccumulatorValue: Float = 0.0f, + public val learningRate: Float = 0.001f, + public val l1RegularizationStrength: Float = 0.0f, + public val l2RegularizationStrength: Float = 0.0f, + public val learningRatePower: Float = -0.5f, + public val l2ShrinkageRegularizationStrength: Float = 0.0f, + public var initialAccumulatorValue: Float = 0.0f, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) { /** */ diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Momentum.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Momentum.kt index 34dd2b934..272bc1107 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Momentum.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/Momentum.kt @@ -25,9 +25,9 @@ private const val MOMENTUM = "momentum" * @property [useNesterov] If true, applies Nesterov momentum. */ public class Momentum( - private val learningRate: Float = 0.001f, - private val momentum: Float = 0.99f, - private val useNesterov: Boolean = true, + public val learningRate: Float = 0.001f, + public val momentum: Float = 0.99f, + public val useNesterov: Boolean = true, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) { private lateinit var momentumConst: Constant diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/RMSProp.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/RMSProp.kt index 5fea565ee..a02377dfd 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/RMSProp.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/RMSProp.kt @@ -31,11 +31,11 @@ private const val MOMENTUM = "momentum" * @property [centered] Centered or not. */ public class RMSProp( - private val learningRate: Float = 0.001f, - private val decay: Float = 0.9f, - private val momentum: Float = 0.0f, - private val epsilon: Float = 1e-10f, - private val centered: Boolean = false, + public val learningRate: Float = 0.001f, + public val decay: Float = 0.9f, + public val momentum: Float = 0.0f, + public val epsilon: Float = 1e-10f, + public val centered: Boolean = false, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) { diff --git a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/SGD.kt b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/SGD.kt index fc92e00a7..d804801f0 100644 --- a/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/SGD.kt +++ b/tensorflow/src/main/kotlin/org/jetbrains/kotlinx/dl/api/core/optimizer/SGD.kt @@ -19,7 +19,7 @@ import org.tensorflow.op.train.ApplyGradientDescent * NOTE: It's not an equivalent for `keras.sgd`, it is a pure SGD with simple 'variable' update by subtracting 'alpha' * 'delta' from it. */ public class SGD( - private var learningRate: Float = 0.2f, + public var learningRate: Float = 0.2f, clipGradient: ClipGradientAction = NoClipGradient() ) : Optimizer(clipGradient) {