Skip to main content
change to case class
Source Link
 trait LagrangeAlgorithOOP {

       val settings: SettingsLagrange
       val problem: ConstrainedProblem
       val innerMinimiser: Minimiser
       val penaltiesFunction: ConstraintPenaltiesFunction

       def iteration( s: StateLagrange):  Either[String, StateLagrange]
       def lagrangeFunction( lams: Lambdas, pens: Penalties): AugmentedLagrangianFunction
       def estimateLambdas( pens: Penalties, las: Lambdas, cons: ConstraintValues): Option[Lambdas]
       def updatePenaltiesHistory( h: HistoryLagrange): HistoryLagrange
     }   
 trait LagrangeAlgorithOOP {

       val settings: SettingsLagrange
       val problem: ConstrainedProblem
       val innerMinimiser: Minimiser
       val penaltiesFunction: ConstraintPenaltiesFunction

       def iteration( s: StateLagrange):  Either[String, StateLagrange]
       def lagrangeFunction( lams: Lambdas, pens: Penalties): AugmentedLagrangianFunction
       def estimateLambdas( pens: Penalties, las: Lambdas, cons: ConstraintValues): Option[Lambdas]
       def updatePenaltiesHistory( h: HistoryLagrange): HistoryLagrange
     }   
 trait LagrangeData {

  val settings: SettingsLagrange
  val problem: ConstrainedProblem
  val innerMinimiser: Minimiser
  val penaltiesFunction: ConstraintPenaltiesFunction
}

trait LagrangeAlgorithmFUN {

  def iteration(d: LagrangeData, s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(d: LagrangeData, lams: Lambdas, pens: Penalties): Lagrangian
  def estimateLambdas(d: LagrangeData, pens: Penalties, old: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory(d: LagrangeData,  h: HistoryLagrange): HistoryLagrange
}
case class LagrangeData(settings: SettingsLagrange,
                        problem: ConstrainedProblem,
                        innerMinimiser: Minimiser,
                        penaltiesFunction: ConstraintPenaltiesFunction)
                                                                    

trait LagrangeAlgorithmFUN {

  def iteration(d: LagrangeData, s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(d: LagrangeData, lams: Lambdas, pens: Penalties): Lagrangian
  def estimateLambdas(d: LagrangeData, pens: Penalties, old: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory(d: LagrangeData,  h: HistoryLagrange): HistoryLagrange
}
  1. What is the best refactoring of this algorithm to an FP style?
  2. Could some half-way approach work better: eg, to leave some of the data fields in the trait?
  3. Is there much difference between the twooriginal and refactored versions?
  4. What is the benefit of refactoring to FP-style in this case?
 trait LagrangeAlgorithOOP {

       val settings: SettingsLagrange
       val problem: ConstrainedProblem
       val innerMinimiser: Minimiser
       val penaltiesFunction: ConstraintPenaltiesFunction

       def iteration( s: StateLagrange):  Either[String, StateLagrange]
       def lagrangeFunction( lams: Lambdas, pens: Penalties): AugmentedLagrangianFunction
       def estimateLambdas( pens: Penalties, las: Lambdas, cons: ConstraintValues): Option[Lambdas]
       def updatePenaltiesHistory( h: HistoryLagrange): HistoryLagrange
     }   
 trait LagrangeData {

  val settings: SettingsLagrange
  val problem: ConstrainedProblem
  val innerMinimiser: Minimiser
  val penaltiesFunction: ConstraintPenaltiesFunction
}

trait LagrangeAlgorithmFUN {

  def iteration(d: LagrangeData, s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(d: LagrangeData, lams: Lambdas, pens: Penalties): Lagrangian
  def estimateLambdas(d: LagrangeData, pens: Penalties, old: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory(d: LagrangeData,  h: HistoryLagrange): HistoryLagrange
}
  1. What is the best refactoring of this algorithm to an FP style?
  2. Could some half-way approach work better: eg, to leave some of the data fields in the trait?
  3. Is there much difference between the two versions?
  4. What is the benefit of refactoring to FP-style in this case?
 trait LagrangeAlgorithOOP {

       val settings: SettingsLagrange
       val problem: ConstrainedProblem
       val innerMinimiser: Minimiser
       val penaltiesFunction: ConstraintPenaltiesFunction

       def iteration( s: StateLagrange):  Either[String, StateLagrange]
       def lagrangeFunction( lams: Lambdas, pens: Penalties): AugmentedLagrangianFunction
       def estimateLambdas( pens: Penalties, las: Lambdas, cons: ConstraintValues): Option[Lambdas]
       def updatePenaltiesHistory( h: HistoryLagrange): HistoryLagrange
     }   
case class LagrangeData(settings: SettingsLagrange,
                        problem: ConstrainedProblem,
                        innerMinimiser: Minimiser,
                        penaltiesFunction: ConstraintPenaltiesFunction)
                                                                    

trait LagrangeAlgorithmFUN {

  def iteration(d: LagrangeData, s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(d: LagrangeData, lams: Lambdas, pens: Penalties): Lagrangian
  def estimateLambdas(d: LagrangeData, pens: Penalties, old: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory(d: LagrangeData,  h: HistoryLagrange): HistoryLagrange
}
  1. What is the best refactoring of this algorithm to an FP style?
  2. Could some half-way approach work better: eg, to leave some of the data fields in the trait?
  3. Is there much difference between the original and refactored versions?
  4. What is the benefit of refactoring to FP-style in this case?
added 54 characters in body
Source Link

Functional programming strongly suggests to separate data from behaviours (functions). However, I can't see the benefit of this for an algorithm's implementation intrinsically tied with particular settings data.

For example, suppose there's a trait LagrangeAlgorithmOOP with immutable data being the algorithmic settings, problem specification and dependencies on helpers. The trait's methods all use this data to find a problem solution. Their implementation is specific to the algorithm's type. Almost none of would make sense as a stand-alone function.

Specifically, suppose we refactor

 trait LagrangeAlgorithmOOPLagrangeAlgorithOOP {

       val settings: SettingsLagrange
       val problem: ConstrainedProblem
       val innerMinimiser: Minimiser
       val penaltiesFunction: ConstraintPenaltiesFunction

       def iteration( s: StateLagrange):  Either[String, StateLagrange]
       def lagrangeFunction( lams: Lambdas, pens: Penalties): AugmentedLagrangianFunction
       def estimateLambdas( pens: Penalties, las: Lambdas, cons: ConstraintValues): Option[Lambdas]
       def updatePenaltiesHistory( h: HistoryLagrange): HistoryLagrange
 
     }   

into this

 trait LagrangeData {
  
  val settings: SettingsLagrange
  val problem: ConstrainedProblem
  val innerMinimiser: Minimiser
  val penaltiesFunction: ConstraintPenaltiesFunction
}

trait LagrangeAlgorithFUNLagrangeAlgorithmFUN {

  def iteration(d: LagrangeData, s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(d: LagrangeData, lams: Lambdas, pens: Penalties): Lagrangian
  def estimateLambdas(d: LagrangeData, pens: Penalties, old: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory(d: LagrangeData,  h: HistoryLagrange): HistoryLagrange
}

The latter case introduces a data class and an extra parameter into each method. (Instead, I could use a Reader monad, which would, however, also require monad transformers.)

Questions:

  1. What is the best refactoring of this algorithm to an FP style?
  2. Could some half-way approach work better: eg, to leave some of the data fields in the trait?
  3. Is there much difference between the two versions?
  4. What is the benefit of refactoring to FP-style in this case?

Note: I agree with many points in the related post Why is "tight coupling between functions and data" bad?. Still I'm not sure how this applies to immutable settings data that is intrinsic to the functions implementing the algorithm.

Functional programming strongly suggests to separate data from behaviours (functions). However, I can't see the benefit of this for an algorithm's implementation intrinsically tied with particular settings data.

For example, suppose there's a trait LagrangeAlgorithmOOP with immutable data being the algorithmic settings, problem specification and dependencies on helpers. The trait's methods all use this data to find a problem solution. Their implementation is specific to the algorithm's type. Almost none of would make sense as a stand-alone function.

Specifically, suppose we refactor

 trait LagrangeAlgorithmOOP{

  val settings: SettingsLagrange
  val problem: ConstrainedProblem
  val innerMinimiser: Minimiser
  val penaltiesFunction: ConstraintPenaltiesFunction

  def iteration(s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(lams: Lambdas, pens: Penalties): AugmentedLagrangianFunction
  def estimateLambdas(pens: Penalties, las: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory( h: HistoryLagrange): HistoryLagrange
 
}

into this

trait LagrangeData {
  
  val settings: SettingsLagrange
  val problem: ConstrainedProblem
  val innerMinimiser: Minimiser
  val penaltiesFunction: ConstraintPenaltiesFunction
}

trait LagrangeAlgorithFUN {

  def iteration(d: LagrangeData, s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(d: LagrangeData, lams: Lambdas, pens: Penalties): Lagrangian
  def estimateLambdas(d: LagrangeData, pens: Penalties, old: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory(d: LagrangeData,  h: HistoryLagrange): HistoryLagrange
}

The latter case introduces a data class and an extra parameter into each method. (Instead, I could use a Reader monad, which would, however, also require monad transformers.)

Questions:

  1. What is the best refactoring of this algorithm to an FP style?
  2. Could some half-way approach work better: eg, to leave some of the data fields in the trait?
  3. Is there much difference between the two versions?
  4. What is the benefit of refactoring to FP-style in this case?

Note: I agree with many points in the related post Why is "tight coupling between functions and data" bad?. Still I'm not sure how this applies to immutable settings data that is intrinsic to the functions implementing the algorithm.

Functional programming strongly suggests to separate data from behaviours (functions). However, I can't see the benefit of this for an algorithm's implementation intrinsically tied with particular settings data.

For example, suppose there's a trait LagrangeAlgorithmOOP with immutable data being the algorithmic settings, problem specification and dependencies on helpers. The trait's methods all use this data to find a problem solution. Their implementation is specific to the algorithm's type. Almost none of would make sense as a stand-alone function.

Specifically, suppose we refactor

 trait LagrangeAlgorithOOP {

       val settings: SettingsLagrange
       val problem: ConstrainedProblem
       val innerMinimiser: Minimiser
       val penaltiesFunction: ConstraintPenaltiesFunction

       def iteration( s: StateLagrange):  Either[String, StateLagrange]
       def lagrangeFunction( lams: Lambdas, pens: Penalties): AugmentedLagrangianFunction
       def estimateLambdas( pens: Penalties, las: Lambdas, cons: ConstraintValues): Option[Lambdas]
       def updatePenaltiesHistory( h: HistoryLagrange): HistoryLagrange
     }   

into this

 trait LagrangeData {

  val settings: SettingsLagrange
  val problem: ConstrainedProblem
  val innerMinimiser: Minimiser
  val penaltiesFunction: ConstraintPenaltiesFunction
}

trait LagrangeAlgorithmFUN {

  def iteration(d: LagrangeData, s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(d: LagrangeData, lams: Lambdas, pens: Penalties): Lagrangian
  def estimateLambdas(d: LagrangeData, pens: Penalties, old: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory(d: LagrangeData,  h: HistoryLagrange): HistoryLagrange
}

The latter case introduces a data class and an extra parameter into each method. (Instead, I could use a Reader monad, which would, however, also require monad transformers.)

Questions:

  1. What is the best refactoring of this algorithm to an FP style?
  2. Could some half-way approach work better: eg, to leave some of the data fields in the trait?
  3. Is there much difference between the two versions?
  4. What is the benefit of refactoring to FP-style in this case?

Note: I agree with many points in the related post Why is "tight coupling between functions and data" bad?. Still I'm not sure how this applies to immutable settings data that is intrinsic to the functions implementing the algorithm.

Source Link

What's the benefit of separating specialised data from behaviour in an algorithm?

Functional programming strongly suggests to separate data from behaviours (functions). However, I can't see the benefit of this for an algorithm's implementation intrinsically tied with particular settings data.

For example, suppose there's a trait LagrangeAlgorithmOOP with immutable data being the algorithmic settings, problem specification and dependencies on helpers. The trait's methods all use this data to find a problem solution. Their implementation is specific to the algorithm's type. Almost none of would make sense as a stand-alone function.

Specifically, suppose we refactor

 trait LagrangeAlgorithmOOP{

  val settings: SettingsLagrange
  val problem: ConstrainedProblem
  val innerMinimiser: Minimiser
  val penaltiesFunction: ConstraintPenaltiesFunction

  def iteration(s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(lams: Lambdas, pens: Penalties): AugmentedLagrangianFunction
  def estimateLambdas(pens: Penalties, las: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory( h: HistoryLagrange): HistoryLagrange

}

into this

trait LagrangeData {
  
  val settings: SettingsLagrange
  val problem: ConstrainedProblem
  val innerMinimiser: Minimiser
  val penaltiesFunction: ConstraintPenaltiesFunction
}

trait LagrangeAlgorithFUN {

  def iteration(d: LagrangeData, s: StateLagrange):  Either[String, StateLagrange]
  def lagrangeFunction(d: LagrangeData, lams: Lambdas, pens: Penalties): Lagrangian
  def estimateLambdas(d: LagrangeData, pens: Penalties, old: Lambdas, cons: ConstraintValues): Option[Lambdas]
  def updatePenaltiesHistory(d: LagrangeData,  h: HistoryLagrange): HistoryLagrange
}

The latter case introduces a data class and an extra parameter into each method. (Instead, I could use a Reader monad, which would, however, also require monad transformers.)

Questions:

  1. What is the best refactoring of this algorithm to an FP style?
  2. Could some half-way approach work better: eg, to leave some of the data fields in the trait?
  3. Is there much difference between the two versions?
  4. What is the benefit of refactoring to FP-style in this case?

Note: I agree with many points in the related post Why is "tight coupling between functions and data" bad?. Still I'm not sure how this applies to immutable settings data that is intrinsic to the functions implementing the algorithm.