IpTNLP.hpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. // Copyright (C) 2004, 2009 International Business Machines and others.
  2. // All Rights Reserved.
  3. // This code is published under the Eclipse Public License.
  4. //
  5. // $Id: IpTNLP.hpp 2212 2013-04-14 14:51:52Z stefan $
  6. //
  7. // Authors: Carl Laird, Andreas Waechter IBM 2004-08-13
  8. #ifndef __IPTNLP_HPP__
  9. #define __IPTNLP_HPP__
  10. #include "IpUtils.hpp"
  11. #include "IpReferenced.hpp"
  12. #include "IpException.hpp"
  13. #include "IpAlgTypes.hpp"
  14. #include "IpReturnCodes.hpp"
  15. #include <map>
  16. namespace Ipopt
  17. {
  18. // forward declarations
  19. class IpoptData;
  20. class IpoptCalculatedQuantities;
  21. class IteratesVector;
  22. /** Base class for all NLP's that use standard triplet matrix form
  23. * and dense vectors. This is the standard base class for all
  24. * NLP's that use the standard triplet matrix form (as for Harwell
  25. * routines) and dense vectors. The class TNLPAdapter then converts
  26. * this interface to an interface that can be used directly by
  27. * ipopt.
  28. *
  29. * This interface presents the problem form:
  30. *
  31. * min f(x)
  32. *
  33. * s.t. gL <= g(x) <= gU
  34. *
  35. * xL <= x <= xU
  36. *
  37. * In order to specify an equality constraint, set gL_i = gU_i =
  38. * rhs. The value that indicates "infinity" for the bounds
  39. * (i.e. the variable or constraint has no lower bound (-infinity)
  40. * or upper bound (+infinity)) is set through the option
  41. * nlp_lower_bound_inf and nlp_upper_bound_inf. To indicate that a
  42. * variable has no upper or lower bound, set the bound to
  43. * -ipopt_inf or +ipopt_inf respectively
  44. */
  45. class TNLP : public ReferencedObject
  46. {
  47. public:
  48. /** Type of the constraints*/
  49. enum LinearityType
  50. {
  51. LINEAR/** Constraint/Variable is linear.*/,
  52. NON_LINEAR/**Constraint/Varaible is non-linear.*/
  53. };
  54. /**@name Constructors/Destructors */
  55. //@{
  56. TNLP()
  57. {}
  58. /** Default destructor */
  59. virtual ~TNLP()
  60. {}
  61. //@}
  62. DECLARE_STD_EXCEPTION(INVALID_TNLP);
  63. /**@name methods to gather information about the NLP */
  64. //@{
  65. /** overload this method to return the number of variables
  66. * and constraints, and the number of non-zeros in the jacobian and
  67. * the hessian. The index_style parameter lets you specify C or Fortran
  68. * style indexing for the sparse matrix iRow and jCol parameters.
  69. * C_STYLE is 0-based, and FORTRAN_STYLE is 1-based.
  70. */
  71. enum IndexStyleEnum { C_STYLE=0, FORTRAN_STYLE=1 };
  72. virtual bool get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
  73. Index& nnz_h_lag, IndexStyleEnum& index_style)=0;
  74. typedef std::map<std::string, std::vector<std::string> > StringMetaDataMapType;
  75. typedef std::map<std::string, std::vector<Index> > IntegerMetaDataMapType;
  76. typedef std::map<std::string, std::vector<Number> > NumericMetaDataMapType;
  77. /** overload this method to return any meta data for
  78. * the variables and the constraints */
  79. virtual bool get_var_con_metadata(Index n,
  80. StringMetaDataMapType& var_string_md,
  81. IntegerMetaDataMapType& var_integer_md,
  82. NumericMetaDataMapType& var_numeric_md,
  83. Index m,
  84. StringMetaDataMapType& con_string_md,
  85. IntegerMetaDataMapType& con_integer_md,
  86. NumericMetaDataMapType& con_numeric_md)
  87. {
  88. return false;
  89. }
  90. /** overload this method to return the information about the bound
  91. * on the variables and constraints. The value that indicates
  92. * that a bound does not exist is specified in the parameters
  93. * nlp_lower_bound_inf and nlp_upper_bound_inf. By default,
  94. * nlp_lower_bound_inf is -1e19 and nlp_upper_bound_inf is
  95. * 1e19. (see TNLPAdapter) */
  96. virtual bool get_bounds_info(Index n, Number* x_l, Number* x_u,
  97. Index m, Number* g_l, Number* g_u)=0;
  98. /** overload this method to return scaling parameters. This is
  99. * only called if the options are set to retrieve user scaling.
  100. * There, use_x_scaling (or use_g_scaling) should get set to true
  101. * only if the variables (or constraints) are to be scaled. This
  102. * method should return true only if the scaling parameters could
  103. * be provided.
  104. */
  105. virtual bool get_scaling_parameters(Number& obj_scaling,
  106. bool& use_x_scaling, Index n,
  107. Number* x_scaling,
  108. bool& use_g_scaling, Index m,
  109. Number* g_scaling)
  110. {
  111. return false;
  112. }
  113. /** overload this method to return the variables linearity
  114. * (TNLP::LINEAR or TNLP::NON_LINEAR). The var_types
  115. * array has been allocated with length at least n. (default implementation
  116. * just return false and does not fill the array).*/
  117. virtual bool get_variables_linearity(Index n, LinearityType* var_types)
  118. {
  119. return false;
  120. }
  121. /** overload this method to return the constraint linearity.
  122. * array has been allocated with length at least n. (default implementation
  123. * just return false and does not fill the array).*/
  124. virtual bool get_constraints_linearity(Index m, LinearityType* const_types)
  125. {
  126. return false;
  127. }
  128. /** overload this method to return the starting point. The bool
  129. * variables indicate whether the algorithm wants you to
  130. * initialize x, z_L/z_u, and lambda, respectively. If, for some
  131. * reason, the algorithm wants you to initialize these and you
  132. * cannot, return false, which will cause Ipopt to stop. You
  133. * will have to run Ipopt with different options then.
  134. */
  135. virtual bool get_starting_point(Index n, bool init_x, Number* x,
  136. bool init_z, Number* z_L, Number* z_U,
  137. Index m, bool init_lambda,
  138. Number* lambda)=0;
  139. /** overload this method to provide an Ipopt iterate (already in
  140. * the form Ipopt requires it internally) for a warm start.
  141. * Since this is only for expert users, a default dummy
  142. * implementation is provided and returns false. */
  143. virtual bool get_warm_start_iterate(IteratesVector& warm_start_iterate)
  144. {
  145. return false;
  146. }
  147. /** overload this method to return the value of the objective function */
  148. virtual bool eval_f(Index n, const Number* x, bool new_x,
  149. Number& obj_value)=0;
  150. /** overload this method to return the vector of the gradient of
  151. * the objective w.r.t. x */
  152. virtual bool eval_grad_f(Index n, const Number* x, bool new_x,
  153. Number* grad_f)=0;
  154. /** overload this method to return the vector of constraint values */
  155. virtual bool eval_g(Index n, const Number* x, bool new_x,
  156. Index m, Number* g)=0;
  157. /** overload this method to return the jacobian of the
  158. * constraints. The vectors iRow and jCol only need to be set
  159. * once. The first call is used to set the structure only (iRow
  160. * and jCol will be non-NULL, and values will be NULL) For
  161. * subsequent calls, iRow and jCol will be NULL. */
  162. virtual bool eval_jac_g(Index n, const Number* x, bool new_x,
  163. Index m, Index nele_jac, Index* iRow,
  164. Index *jCol, Number* values)=0;
  165. /** overload this method to return the hessian of the
  166. * lagrangian. The vectors iRow and jCol only need to be set once
  167. * (during the first call). The first call is used to set the
  168. * structure only (iRow and jCol will be non-NULL, and values
  169. * will be NULL) For subsequent calls, iRow and jCol will be
  170. * NULL. This matrix is symmetric - specify the lower diagonal
  171. * only. A default implementation is provided, in case the user
  172. * wants to se quasi-Newton approximations to estimate the second
  173. * derivatives and doesn't not neet to implement this method. */
  174. virtual bool eval_h(Index n, const Number* x, bool new_x,
  175. Number obj_factor, Index m, const Number* lambda,
  176. bool new_lambda, Index nele_hess,
  177. Index* iRow, Index* jCol, Number* values)
  178. {
  179. return false;
  180. }
  181. //@}
  182. /** @name Solution Methods */
  183. //@{
  184. /** This method is called when the algorithm is complete so the TNLP can store/write the solution */
  185. virtual void finalize_solution(SolverReturn status,
  186. Index n, const Number* x, const Number* z_L, const Number* z_U,
  187. Index m, const Number* g, const Number* lambda,
  188. Number obj_value,
  189. const IpoptData* ip_data,
  190. IpoptCalculatedQuantities* ip_cq)=0;
  191. /** This method is called just before finalize_solution. With
  192. * this method, the algorithm returns any metadata collected
  193. * during its run, including the metadata provided by the user
  194. * with the above get_var_con_metadata. Each metadata can be of
  195. * type string, integer, and numeric. It can be associated to
  196. * either the variables or the constraints. The metadata that
  197. * was associated with the primal variable vector is stored in
  198. * var_..._md. The metadata associated with the constraint
  199. * multipliers is stored in con_..._md. The metadata associated
  200. * with the bound multipliers is stored in var_..._md, with the
  201. * suffixes "_z_L", and "_z_U", denoting lower and upper
  202. * bounds. */
  203. virtual void finalize_metadata(Index n,
  204. const StringMetaDataMapType& var_string_md,
  205. const IntegerMetaDataMapType& var_integer_md,
  206. const NumericMetaDataMapType& var_numeric_md,
  207. Index m,
  208. const StringMetaDataMapType& con_string_md,
  209. const IntegerMetaDataMapType& con_integer_md,
  210. const NumericMetaDataMapType& con_numeric_md)
  211. {}
  212. /** Intermediate Callback method for the user. Providing dummy
  213. * default implementation. For details see IntermediateCallBack
  214. * in IpNLP.hpp. */
  215. virtual bool intermediate_callback(AlgorithmMode mode,
  216. Index iter, Number obj_value,
  217. Number inf_pr, Number inf_du,
  218. Number mu, Number d_norm,
  219. Number regularization_size,
  220. Number alpha_du, Number alpha_pr,
  221. Index ls_trials,
  222. const IpoptData* ip_data,
  223. IpoptCalculatedQuantities* ip_cq)
  224. {
  225. return true;
  226. }
  227. //@}
  228. /** @name Methods for quasi-Newton approximation. If the second
  229. * derivatives are approximated by Ipopt, it is better to do this
  230. * only in the space of nonlinear variables. The following
  231. * methods are call by Ipopt if the quasi-Newton approximation is
  232. * selected. If -1 is returned as number of nonlinear variables,
  233. * Ipopt assumes that all variables are nonlinear. Otherwise, it
  234. * calls get_list_of_nonlinear_variables with an array into which
  235. * the indices of the nonlinear variables should be written - the
  236. * array has the lengths num_nonlin_vars, which is identical with
  237. * the return value of get_number_of_nonlinear_variables(). It
  238. * is assumed that the indices are counted starting with 1 in the
  239. * FORTRAN_STYLE, and 0 for the C_STYLE. */
  240. //@{
  241. virtual Index get_number_of_nonlinear_variables()
  242. {
  243. return -1;
  244. }
  245. virtual bool get_list_of_nonlinear_variables(Index num_nonlin_vars,
  246. Index* pos_nonlin_vars)
  247. {
  248. return false;
  249. }
  250. //@}
  251. private:
  252. /**@name Default Compiler Generated Methods
  253. * (Hidden to avoid implicit creation/calling).
  254. * These methods are not implemented and
  255. * we do not want the compiler to implement
  256. * them for us, so we declare them private
  257. * and do not define them. This ensures that
  258. * they will not be implicitly created/called. */
  259. //@{
  260. /** Default Constructor */
  261. //TNLP();
  262. /** Copy Constructor */
  263. TNLP(const TNLP&);
  264. /** Overloaded Equals Operator */
  265. void operator=(const TNLP&);
  266. //@}
  267. };
  268. } // namespace Ipopt
  269. #endif