CoolProp 7.2.0
An open-source fluid property and humid air property database
superancillary.h
Go to the documentation of this file.
1/*
2# NIST Disclaimer of Copyright and Warranty
3
4This software was developed by employees of the National Institute of
5Standards and Technology (NIST), an agency of the Federal Government
6and is being made available as a public service. Pursuant to title 17
7United States Code Section 105, works of NIST employees are not
8subject to copyright protection in the United States. This software
9may be subject to foreign copyright. Permission in the United States
10and in foreign countries, to the extent that NIST may hold copyright,
11to use, copy, modify, create derivative works, and distribute this
12software and its documentation without fee is hereby granted on a
13non-exclusive basis, provided that this notice and disclaimer of
14warranty appears in all copies.
15
16THE SOFTWARE IS PROVIDED 'AS IS' WITHOUT ANY WARRANTY OF ANY KIND,
17EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED
18TO, ANY WARRANTY THAT THE SOFTWARE WILL CONFORM TO SPECIFICATIONS,
19ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
20PURPOSE, AND FREEDOM FROM INFRINGEMENT, AND ANY WARRANTY THAT THE
21DOCUMENTATION WILL CONFORM TO THE SOFTWARE, OR ANY WARRANTY THAT THE
22SOFTWARE WILL BE ERROR FREE. IN NO EVENT SHALL NIST BE LIABLE FOR ANY
23DAMAGES, INCLUDING, BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL OR
24CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, OR IN ANY WAY
25CONNECTED WITH THIS SOFTWARE, WHETHER OR NOT BASED UPON WARRANTY,
26CONTRACT, TORT, OR OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED BY
27PERSONS OR PROPERTY OR OTHERWISE, AND WHETHER OR NOT LOSS WAS
28SUSTAINED FROM, OR AROSE OUT OF THE RESULTS OF, OR USE OF, THE
29SOFTWARE OR SERVICES PROVIDED HEREUNDER.
30
31Subsequent edits by Ian Bell
32*/
33
34#pragma once
35
36#include <iostream>
37#include <utility>
38#include <optional>
39#include <Eigen/Dense>
40
41#include "boost/math/tools/toms748_solve.hpp"
42#include "nlohmann/json.hpp"
43
44namespace CoolProp {
45namespace superancillary {
46
47namespace detail {
48
49// From https://arxiv.org/pdf/1401.5766.pdf (Algorithm #3)
50template <typename Matrix>
51inline void balance_matrix(const Matrix& A, Matrix& Aprime, Matrix& D) {
52 const int p = 2;
53 double beta = 2; // Radix base (2?)
54 int iter = 0;
55 Aprime = A;
56 D = Matrix::Identity(A.rows(), A.cols());
57 bool converged = false;
58 do {
59 converged = true;
60 for (Eigen::Index i = 0; i < A.rows(); ++i) {
61 double c = Aprime.col(i).template lpNorm<p>();
62 double r = Aprime.row(i).template lpNorm<p>();
63 double s = pow(c, p) + pow(r, p);
64 double f = 1;
65 //if (!ValidNumber(c)){
66 // std::cout << A << std::endl;
67 // throw std::range_error("c is not a valid number in balance_matrix"); }
68 //if (!ValidNumber(r)) { throw std::range_error("r is not a valid number in balance_matrix"); }
69 while (c < r / beta) {
70 c *= beta;
71 r /= beta;
72 f *= beta;
73 }
74 while (c >= r * beta) {
75 c /= beta;
76 r *= beta;
77 f /= beta;
78 }
79 if (pow(c, p) + pow(r, p) < 0.95 * s) {
80 converged = false;
81 D(i, i) *= f;
82 Aprime.col(i) *= f;
83 Aprime.row(i) /= f;
84 }
85 }
86 iter++;
87 if (iter > 50) {
88 break;
89 }
90 } while (!converged);
91}
92
93inline void companion_matrix_transposed(const Eigen::ArrayXd& coeffs, Eigen::MatrixXd& A) {
94 auto N = coeffs.size() - 1; // degree
95 if (A.rows() != N) {
96 throw std::invalid_argument("A.rows() != N");
97 }
98 A.setZero();
99 // First col
100 A(1, 0) = 1;
101 // Last col
102 A.col(N - 1) = -coeffs.head(N) / (2.0 * coeffs(N));
103 A(N - 2, N - 1) += 0.5;
104 // All the other cols
105 for (int j = 1; j < N - 1; ++j) {
106 A(j - 1, j) = 0.5;
107 A(j + 1, j) = 0.5;
108 }
109}
110inline void companion_matrix_transposed(const std::vector<double>& coeffs, Eigen::MatrixXd& A) {
111 Eigen::ArrayXd coeffs_ = Eigen::Map<const Eigen::ArrayXd>(&coeffs[0], coeffs.size());
112 return companion_matrix_transposed(coeffs_, A);
113}
114
123inline auto get_LU_matrices(std::size_t N) {
124 Eigen::MatrixXd L(N + 1, N + 1);
125 Eigen::MatrixXd U(N + 1, N + 1);
126 for (std::size_t j = 0; j <= N; ++j) {
127 for (std::size_t k = j; k <= N; ++k) {
128 double p_j = (j == 0 || j == N) ? 2 : 1;
129 double p_k = (k == 0 || k == N) ? 2 : 1;
130 double cosjPikN = cos((j * EIGEN_PI * k) / N);
131 L(j, k) = 2.0 / (p_j * p_k * N) * cosjPikN;
132 // Exploit symmetry to fill in the symmetric elements in the matrix
133 L(k, j) = L(j, k);
134
135 U(j, k) = cosjPikN;
136 // Exploit symmetry to fill in the symmetric elements in the matrix
137 U(k, j) = U(j, k);
138 }
139 }
140 return std::make_tuple(L, U);
141}
142
143inline double M_element_norm(const std::vector<double>& x, Eigen::Index M) {
144 Eigen::Map<const Eigen::ArrayXd> X(&x[0], x.size());
145 return X.tail(M).matrix().norm() / X.head(M).matrix().norm();
146}
147
148inline double M_element_norm(const Eigen::ArrayXd& x, Eigen::Index M) {
149 return x.tail(M).matrix().norm() / x.head(M).matrix().norm();
150}
151
156template <typename Function, typename Container>
157inline auto dyadic_splitting(const std::size_t N, const Function& func, const double xmin, const double xmax, const int M = 3,
158 const double tol = 1e-12, const int max_refine_passes = 8,
159 const std::optional<std::function<void(int, const Container&)>>& callback = std::nullopt) -> Container {
160 using CE_t = std::decay_t<decltype(Container().front())>;
161 using ArrayType = std::decay_t<decltype(CE_t().coeff())>;
162
163 Eigen::MatrixXd Lmat, Umat;
164 std::tie(Lmat, Umat) = detail::get_LU_matrices(N);
165
166 auto builder = [&](double xmin, double xmax) -> CE_t {
167 auto get_nodes_realworld = [N, xmin, xmax]() -> Eigen::ArrayXd {
168 Eigen::ArrayXd nodes = (Eigen::ArrayXd::LinSpaced(N + 1, 0, static_cast<double>(N)).array() * EIGEN_PI / N).cos();
169 return ((xmax - xmin) * nodes + (xmax + xmin)) * 0.5;
170 };
171
172 Eigen::ArrayXd x = get_nodes_realworld();
173
174 // Apply the function to do the transformation
175 // of the functional values at the nodes
176 for (auto j = 0L; j < x.size(); ++j) {
177 x(j) = func(j, static_cast<long>(x.size()), x(j));
178 }
179
180 // And now rebuild the expansion by left-multiplying by the L matrix
181 Eigen::ArrayXd c = Lmat * x.matrix();
182 // std::cout << "c: " << c << std::endl;
183 // Check if any coefficients are invalid, stop if so
184 if (!c.allFinite()) {
185 throw std::invalid_argument("At least one coefficient is non-finite");
186 }
187 if constexpr (std::is_same_v<ArrayType, std::vector<double>>) {
188 return {xmin, xmax, std::vector<double>(&c[0], &c[0] + c.size())};
189 } else {
190 // Probably an Eigen::ArrayXd, just pass that into constructor
191 return {xmin, xmax, c};
192 }
193 };
194
195 // Start off with the full domain from xmin to xmax
196 Container expansions;
197 expansions.emplace_back(builder(xmin, xmax));
198
199 // Now enter into refinement passes
200 for (int refine_pass = 0; refine_pass < max_refine_passes; ++refine_pass) {
201 bool all_converged = true;
202 // Start at the right and move left because insertions will make the length increase
203 for (int iexpansion = static_cast<int>(expansions.size()) - 1; iexpansion >= 0; --iexpansion) {
204 auto& expan = expansions[iexpansion];
205 auto err = M_element_norm(expan.coeff(), M);
206 // std::cout << "err: " << err << std::endl;
207 if (err > tol) {
208 // Splitting is required, do a dyadic split
209 auto xmid = (expan.xmin() + expan.xmax()) / 2;
210 CE_t newleft{builder(expan.xmin(), xmid)};
211 CE_t newright{builder(xmid, expan.xmax())};
212
213 expansions.at(iexpansion) = std::move(newleft);
214 expansions.insert(expansions.begin() + iexpansion + 1, newright);
215 all_converged = false;
216 }
217 // std::cout << expansions.size() << std::endl;
218 }
219 if (callback) {
220 callback.value()(refine_pass, expansions);
221 }
222 if (all_converged) {
223 break;
224 }
225 }
226 return expansions;
227}
228
229} // namespace detail
230
241template <typename ArrayType>
243{
244 private:
245 double m_xmin,
246 m_xmax;
247 ArrayType m_coeff;
248 public:
250
252 ChebyshevExpansion(double xmin, double xmax, const ArrayType& coeff) : m_xmin(xmin), m_xmax(xmax), m_coeff(coeff) {};
253
258
260 const auto xmin() const {
261 return m_xmin;
262 }
263
265 const auto xmax() const {
266 return m_xmax;
267 }
268
270 const auto& coeff() const {
271 return m_coeff;
272 }
273
277 template <typename T>
278 auto eval(const T& x) const {
279 // Scale to (-1, 1)
280 T xscaled = (2.0 * x - (m_xmax + m_xmin)) / (m_xmax - m_xmin);
281 int Norder = static_cast<int>(m_coeff.size()) - 1;
282 T u_k = 0, u_kp1 = m_coeff[Norder], u_kp2 = 0;
283 for (int k = Norder - 1; k > 0; k--) { // k must be signed!
284 // Do the recurrent calculation
285 u_k = 2.0 * xscaled * u_kp1 - u_kp2 + m_coeff[k];
286 // Update the values
287 u_kp2 = u_kp1;
288 u_kp1 = u_k;
289 }
290 T retval = m_coeff[0] + xscaled * u_kp1
291 - u_kp2; // This seems inefficient but required to ensure that Eigen::Array are evaluated and an expression is not returned
292 return retval;
293 }
294
296 template <typename T>
297 auto eval_many(const T& x, T& y) const {
298 if (x.size() != y.size()) {
299 throw std::invalid_argument("x and y are not the same size");
300 }
301 for (auto i = 0; i < x.size(); ++i) {
302 y(i) = eval(x(i));
303 }
304 }
305
307 template <typename T>
308 auto eval_manyC(const T x[], T y[], std::size_t N) const {
309 for (std::size_t i = 0; i < N; ++i) {
310 y[i] = eval(x[i]);
311 }
312 }
313
315 template <typename T>
316 auto eval_Eigen(const T& x, T& y) const {
317 if (x.size() != y.size()) {
318 throw std::invalid_argument("x and y are not the same size");
319 }
320 y = eval(x);
321 }
322
324 Eigen::ArrayXd get_nodes_realworld() const {
325 Eigen::Index N = m_coeff.size() - 1;
326 Eigen::ArrayXd nodes = (Eigen::ArrayXd::LinSpaced(N + 1, 0, N).array() * EIGEN_PI / N).cos();
327 return ((m_xmax - m_xmin) * nodes + (m_xmax + m_xmin)) * 0.5;
328 }
329
343 auto solve_for_x_count(double y, double a, double b, unsigned int bits, std::size_t max_iter, double boundsytol) const {
344 using namespace boost::math::tools;
345 std::size_t counter = 0;
346 auto f = [&](double x) {
347 counter++;
348 return eval(x) - y;
349 };
350 double fa = f(a);
351 if (std::abs(fa) < boundsytol) {
352 return std::make_tuple(a, std::size_t{1});
353 }
354 double fb = f(b);
355 if (std::abs(fb) < boundsytol) {
356 return std::make_tuple(b, std::size_t{2});
357 }
358 boost::math::uintmax_t max_iter_ = static_cast<boost::math::uintmax_t>(max_iter);
359 auto [l, r] = toms748_solve(f, a, b, fa, fb, eps_tolerance<double>(bits), max_iter_);
360 return std::make_tuple((r + l) / 2.0, counter);
361 }
362
365 auto solve_for_x(double y, double a, double b, unsigned int bits, std::size_t max_iter, double boundsytol) const {
366 return std::get<0>(solve_for_x_count(y, a, b, bits, max_iter, boundsytol));
367 }
368
370 template <typename T, typename CountsT>
371 auto solve_for_x_many(const T& y, double a, double b, unsigned int bits, std::size_t max_iter, double boundsytol, T& x, CountsT& counts) const {
372 if (x.size() != y.size()) {
373 throw std::invalid_argument("x and y are not the same size");
374 }
375 for (auto i = 0; i < x.size(); ++i) {
376 std::tie(x(i), counts(i)) = solve_for_x_count(y(i), a, b, bits, max_iter, boundsytol);
377 }
378 }
379
381 template <typename T, typename CountsT>
382 auto solve_for_x_manyC(const T y[], std::size_t N, double a, double b, unsigned int bits, std::size_t max_iter, double boundsytol, T x[],
383 CountsT counts[]) const {
384 for (std::size_t i = 0; i < N; ++i) {
385 std::tie(x[i], counts[i]) = solve_for_x_count(y[i], a, b, bits, max_iter, boundsytol);
386 }
387 }
388
389 ArrayType do_derivs(std::size_t Nderiv) const {
390 // See Mason and Handscomb, p. 34, Eq. 2.52
391 // and example in https ://github.com/numpy/numpy/blob/master/numpy/polynomial/chebyshev.py#L868-L964
392 ArrayType c = m_coeff;
393 for (std::size_t deriv_counter = 0; deriv_counter < Nderiv; ++deriv_counter) {
394 std::size_t N = c.size() - 1,
395 Nd = N - 1;
396 ArrayType cd(N);
397 for (std::size_t r = 0; r <= Nd; ++r) {
398 cd[r] = 0;
399 for (std::size_t k = r + 1; k <= N; ++k) {
400 // Terms where k-r is odd have values, otherwise, they are zero
401 if ((k - r) % 2 == 1) {
402 cd[r] += 2 * k * c[k];
403 }
404 }
405 // The first term with r = 0 is divided by 2 (the single prime in Mason and Handscomb, p. 34, Eq. 2.52)
406 if (r == 0) {
407 cd[r] /= 2;
408 }
409 // Rescale the values if the range is not [-1,1]. Arrives from the derivative of d(xreal)/d(x_{-1,1})
410 cd[r] /= (m_xmax - m_xmin) / 2.0;
411 }
412 if (Nderiv == 1) {
413 return cd;
414 } else {
415 c = cd;
416 }
417 }
418 return c;
419 }
420};
421
422static_assert(std::is_move_assignable_v<ChebyshevExpansion<std::vector<double>>>);
423//static_assert(std::is_copy_assignable_v<ChebyshevExpansion<std::vector<double>>>);
424
427{
428 std::size_t idx;
429 double ymin,
434 bool contains_y(double y) const {
435 return y >= ymin && y <= ymax;
436 }
437};
438
441{
442 std::vector<MonotonicExpansionMatch> expansioninfo;
443 double xmin,
448 bool contains_y(double y) const {
449 return y >= ymin && y <= ymax;
450 }
451};
452
462template <typename ArrayType = Eigen::ArrayXd>
464{
465 private:
466 const double thresh_imag = 1e-15;
467 std::vector<ChebyshevExpansion<ArrayType>> m_expansions;
468 std::vector<double> m_x_at_extrema;
469 std::vector<IntervalMatch> m_monotonic_intervals;
470
471 Eigen::ArrayXd head(const Eigen::ArrayXd& c, Eigen::Index N) const {
472 return c.head(N);
473 }
474 std::vector<double> head(const std::vector<double>& c, Eigen::Index N) const {
475 return std::vector<double>(c.begin(), c.begin() + N);
476 }
477
482 std::vector<double> determine_extrema(const std::decay_t<decltype(m_expansions)>& expansions, double thresh_im) const {
483 std::vector<double> x_at_extrema;
484 Eigen::MatrixXd companion_matrix, cprime, D;
485 for (auto& expan : expansions) {
486 // Get the coefficients of the derivative's expansion (for x in [-1, 1])
487 auto cd = expan.do_derivs(1);
488 // First, right-trim the coefficients that are equal to zero
489 int ilastnonzero = static_cast<int>(cd.size()) - 1;
490 for (int i = static_cast<int>(cd.size()) - 1; i >= 0; --i) {
491 if (std::abs(cd[i]) != 0) {
492 ilastnonzero = i;
493 break;
494 }
495 }
496 if (ilastnonzero != static_cast<int>(cd.size() - 1)) {
497 cd = head(cd, ilastnonzero);
498 }
499 // Then do eigenvalue rootfinding after balancing
500 // Define working buffers here to avoid allocations all over the place
501 if (companion_matrix.rows() != static_cast<int>(cd.size() - 1)) {
502 companion_matrix.resize(cd.size() - 1, cd.size() - 1);
503 companion_matrix.setZero();
504 D.resizeLike(companion_matrix);
505 D.setZero();
506 cprime.resizeLike(companion_matrix);
507 cprime.setZero();
508 }
509 detail::companion_matrix_transposed(cd, companion_matrix);
510 cprime = companion_matrix;
511
512 detail::balance_matrix(companion_matrix, cprime, D);
513 for (auto& root : companion_matrix.eigenvalues()) {
514 // re_n11 is in [-1,1], need to rescale back to real units
515 auto re_n11 = root.real(), im = root.imag();
516 if (std::abs(im) < thresh_im) {
517 if (re_n11 >= -1 && re_n11 <= 1) {
518 x_at_extrema.push_back(((expan.xmax() - expan.xmin()) * re_n11 + (expan.xmax() + expan.xmin())) / 2.0);
519 }
520 }
521 }
522 }
523 std::sort(x_at_extrema.begin(), x_at_extrema.end());
524 return x_at_extrema;
525 }
526
531 std::vector<IntervalMatch> build_monotonic_intervals(const std::vector<double>& x_at_extrema) const {
532 std::vector<IntervalMatch> intervals;
533
534 auto sort = [](double& x, double& y) {
535 if (x > y) {
536 std::swap(x, y);
537 }
538 };
539
540 if (false) { //x_at_extrema.empty()){
541 auto xmin = m_expansions.front().xmin();
542 auto xmax = m_expansions.back().xmax();
543 auto ymin = eval(xmin), ymax = eval(xmax);
544 sort(ymin, ymax);
546 mem.idx = 0;
547 mem.xmin = xmin;
548 mem.xmax = xmax;
549 mem.ymin = ymin;
550 mem.ymax = ymax;
551 IntervalMatch im;
552 im.expansioninfo = {mem};
553 im.xmin = mem.xmin;
554 im.xmax = mem.xmax;
555 im.ymin = mem.ymin;
556 im.ymax = mem.ymax;
557 intervals.push_back(im);
558 } else {
559 auto newx = x_at_extrema;
560 newx.insert(newx.begin(), m_expansions.front().xmin());
561 newx.insert(newx.end(), m_expansions.back().xmax());
562
564 auto interval_intersection = [](const auto& t1, const auto& t2) {
565 auto a = std::max(t1.xmin(), t2.xmin);
566 auto b = std::min(t1.xmax(), t2.xmax);
567 return std::make_tuple(a, b);
568 };
569
570 for (auto j = 0; j < static_cast<int>(newx.size() - 1); ++j) {
571 double xmin = newx[j], xmax = newx[j + 1];
572 IntervalMatch im;
573 // Loop over the expansions that contain one of the values of x
574 // that intersect the interval defined by [xmin, xmax]
575 for (auto i = 0UL; i < m_expansions.size(); ++i) {
576 struct A
577 {
578 double xmin, xmax;
579 };
580 auto [a, b] = interval_intersection(m_expansions[i], A{xmin, xmax});
581 if (a < b) {
582 const auto& e = m_expansions[i];
584 mem.idx = i;
585 mem.xmin = a;
586 mem.xmax = b;
587 double ymin = e.eval(a), ymax = e.eval(b);
588 sort(ymin, ymax);
589 mem.ymin = ymin;
590 mem.ymax = ymax;
591 im.expansioninfo.push_back(mem);
592 }
593 }
594 // These are the limits for the entire interval
595 im.xmin = xmin;
596 im.xmax = xmax;
597 double yminoverall = eval(xmin), ymaxoverall = eval(xmax);
598 sort(yminoverall, ymaxoverall);
599 im.ymin = yminoverall;
600 im.ymax = ymaxoverall;
601 intervals.push_back(im);
602 }
603 }
604 return intervals;
605 }
606 double m_xmin,
607 m_xmax;
608
609 public:
610 // Move constructor given a vector of expansions
612 : m_expansions(std::move(expansions)),
613 m_x_at_extrema(determine_extrema(m_expansions, thresh_imag)),
614 m_monotonic_intervals(build_monotonic_intervals(m_x_at_extrema)),
615 m_xmin(get_expansions().front().xmin()),
616 m_xmax(get_expansions().back().xmax()) {}
617
619 : m_expansions(other.m_expansions),
620 m_x_at_extrema(other.m_x_at_extrema),
621 m_monotonic_intervals(other.m_monotonic_intervals),
622 m_xmin(other.xmin()),
623 m_xmax(other.xmax()) {}
624
626 // ChebyshevApproximation1D& operator=(const ChebyshevApproximation1D& other) = default;
628 std::swap(m_expansions, other.m_expansions);
629 std::swap(m_x_at_extrema, other.m_x_at_extrema);
630 std::swap(m_monotonic_intervals, other.m_monotonic_intervals);
631 std::swap(m_xmin, other.m_xmin);
632 std::swap(m_xmax, other.m_xmax);
633 return *this;
634 }
635
637 const auto& get_expansions() const {
638 return m_expansions;
639 }
640
642 const auto& get_x_at_extrema() const {
643 return m_x_at_extrema;
644 }
645
647 const auto& get_monotonic_intervals() const {
648 return m_monotonic_intervals;
649 }
650
652 const auto xmin() const {
653 return m_xmin;
654 }
655
657 const auto xmax() const {
658 return m_xmax;
659 }
660
663 bool is_monotonic() const {
664 return m_monotonic_intervals.size() == 1;
665 }
666
671 auto get_index(double x) const {
672
673 // https://proquest.safaribooksonline.com/9780321637413
674 // https://web.stanford.edu/class/archive/cs/cs107/cs107.1202/lab1/
675 auto midpoint_Knuth = [](int x, int y) { return (x & y) + ((x ^ y) >> 1); };
676
677 int iL = 0U, iR = static_cast<int>(m_expansions.size()) - 1, iM;
678 while (iR - iL > 1) {
679 iM = midpoint_Knuth(iL, iR);
680 if (x >= m_expansions[iM].xmin()) {
681 iL = iM;
682 } else {
683 iR = iM;
684 }
685 }
686 return (x < m_expansions[iL].xmax()) ? iL : iR;
687 };
688
693 double eval(double x) const {
694 return m_expansions[get_index(x)].eval(x);
695 }
696
698 template <typename Container>
699 const auto eval_many(const Container& x, Container& y) const {
700 if (x.size() != y.size()) {
701 throw std::invalid_argument("x and y are not the same size");
702 }
703 for (auto i = 0U; i < x.size(); ++i) {
704 y(i) = eval(x(i));
705 }
706 }
707
709 template <typename Container>
710 const auto eval_manyC(const Container x[], Container y[], std::size_t N) const {
711 for (auto i = 0U; i < N; ++i) {
712 y[i] = eval(x[i]);
713 }
714 }
715
717 const std::vector<IntervalMatch> get_intervals_containing_y(double y) const {
718 std::vector<IntervalMatch> matches;
719 for (auto& interval : m_monotonic_intervals) {
720 if (y >= interval.ymin && y <= interval.ymax) {
721 matches.push_back(interval);
722 }
723 }
724 return matches;
725 }
726
729 const auto get_x_for_y(double y, unsigned int bits, std::size_t max_iter, double boundsftol) const {
730 std::vector<std::pair<double, int>> solns;
731 for (const auto& interval : m_monotonic_intervals) {
732 // Each interval is required to be monotonic internally, so if the value of
733 // y is within the y values at the endpoints it is a candidate
734 // for rootfinding, otherwise continue
735 if (interval.contains_y(y)) {
736 // Now look at the expansions that intersect the interval
737 for (const auto& ei : interval.expansioninfo) {
738 // Again, since the portions of the expansions are required
739 // to be monotonic, if it is contained then a solution must exist
740 if (ei.contains_y(y)) {
741 const ChebyshevExpansion<ArrayType>& e = m_expansions[ei.idx];
742 auto [xvalue, num_steps] = e.solve_for_x_count(y, ei.xmin, ei.xmax, bits, max_iter, boundsftol);
743 solns.emplace_back(xvalue, static_cast<int>(num_steps));
744 }
745 }
746 }
747 }
748 return solns;
749 }
750
752 template <typename YContainer, typename CountContainer>
753 const auto count_x_for_y_many(const YContainer& y, unsigned int bits, std::size_t max_iter, double boundsftol, CountContainer& x) const {
754 if (x.size() != y.size()) {
755 throw std::invalid_argument("x and y are not the same size");
756 }
757 for (auto i = 0U; i < x.size(); ++i) {
758 x(i) = get_x_for_y(y(i), bits, max_iter, boundsftol).size();
759 }
760 }
761
763 template <typename YContainer, typename CountContainer>
764 const auto count_x_for_y_manyC(const YContainer y[], size_t N, unsigned int bits, std::size_t max_iter, double boundsftol,
765 CountContainer x[]) const {
766 for (auto i = 0U; i < N; ++i) {
767 x[i] = get_x_for_y(y[i], bits, max_iter, boundsftol).size();
768 }
769 }
770};
771
772static_assert(std::is_copy_constructible_v<ChebyshevApproximation1D<std::vector<double>>>);
773static_assert(std::is_copy_assignable_v<ChebyshevApproximation1D<std::vector<double>>>);
774
776{
777 double T,
779 std::size_t counter;
780};
781
790template <typename ArrayType = Eigen::ArrayXd>
792{
793 private:
796 m_rhoV,
797 m_p;
798
799 // These ones *may* be present
800 std::optional<ChebyshevApproximation1D<ArrayType>> m_hL,
801 m_hV,
802 m_sL,
803 m_sV,
804 m_uL,
805 m_uV,
806 m_invlnp;
807
808 double m_Tmin;
809 double m_Tcrit_num;
810 double m_rhocrit_num;
811 double m_pmin;
812 double m_pmax;
813
818 auto loader(const nlohmann::json& j, const std::string& key) {
819 std::vector<ChebyshevExpansion<ArrayType>> buf;
820 // If you want to use Eigen...
821 // auto toeig = [](const nlohmann::json &j) -> Eigen::ArrayXd{
822 // auto vec = j.get<std::vector<double>>();
823 // return Eigen::Map<const Eigen::ArrayXd>(&vec[0], vec.size());
824 // };
825 // for (auto& block : j[key]){
826 // buf.emplace_back(block.at("xmin"), block.at("xmax"), toeig(block.at("coef")));
827 // }
828 for (auto& block : j[key]) {
829 buf.emplace_back(block.at("xmin"), block.at("xmax"), block.at("coef"));
830 }
831 return buf;
832 }
833
837 auto make_invlnp(Eigen::Index Ndegree) {
838
839 auto pmin = m_p.eval(m_p.xmin());
840 auto pmax = m_p.eval(m_p.xmax());
841 // auto N = m_p.get_expansions().front().coeff().size()-1;
842 const double EPSILON = std::numeric_limits<double>::epsilon();
843
844 auto func = [&](long i, long Npts, double lnp) -> double {
845 double p = exp(lnp);
846 auto solns = m_p.get_x_for_y(p, 64, 100U, 1e-8);
847
848 if (solns.size() != 1) {
849 if ((i == 0 || i == Npts - 1) && (p > pmin * (1 - EPSILON * 1000) && p < pmin * (1 + EPSILON * 1000))) {
850 return m_p.get_monotonic_intervals()[0].xmin;
851 }
852 if ((i == 0 || i == Npts - 1) && (p > pmax * (1 - EPSILON * 1000) && p < pmax * (1 + EPSILON * 1000))) {
853 return m_p.get_monotonic_intervals()[0].xmax;
854 }
855 std::stringstream ss;
856 ss << std::setprecision(20) << "Must have one solution for T(p), got " << solns.size() << " for " << p << " Pa; limits are [" << pmin
857 << +" Pa , " << pmax << " Pa]; i is " << i;
858 throw std::invalid_argument(ss.str());
859 }
860 auto [T, iters] = solns[0];
861 return T;
862 };
863
864 using CE_t = std::vector<ChebyshevExpansion<ArrayType>>;
865 return detail::dyadic_splitting<decltype(func), CE_t>(Ndegree, func, log(pmin), log(pmax), 3, 1e-12, 26);
866 }
867
868 // using PropertyPairs = properties::PropertyPairs; ///< A convenience alias to save some typing
869
870 public:
873 SuperAncillary(const nlohmann::json& j)
874 : m_rhoL(std::move(loader(j, "jexpansions_rhoL"))),
875 m_rhoV(std::move(loader(j, "jexpansions_rhoV"))),
876 m_p(std::move(loader(j, "jexpansions_p"))),
877 m_Tmin(m_p.xmin()),
878 m_Tcrit_num(j.at("meta").at("Tcrittrue / K")),
879 m_rhocrit_num(j.at("meta").at("rhocrittrue / mol/m^3")),
880 m_pmin(m_p.eval(m_p.xmin())),
881 m_pmax(m_p.eval(m_p.xmax())) {};
882
886 SuperAncillary(const std::string& s) : SuperAncillary(nlohmann::json::parse(s)) {};
887
893 const auto& get_approx1d(char k, short Q) const {
894 auto get_or_throw = [&](const auto& v) -> const auto& {
895 if (v) {
896 return v.value();
897 } else {
898 throw std::invalid_argument("unable to get the variable " + std::string(1, k) + ", make sure it has been added to superancillary");
899 }
900 };
901 switch (k) {
902 case 'P':
903 return m_p;
904 case 'D':
905 return (Q == 0) ? m_rhoL : m_rhoV;
906 case 'H':
907 return (Q == 0) ? get_or_throw(m_hL) : get_or_throw(m_hV);
908 case 'S':
909 return (Q == 0) ? get_or_throw(m_sL) : get_or_throw(m_sV);
910 case 'U':
911 return (Q == 0) ? get_or_throw(m_uL) : get_or_throw(m_uV);
912 default:
913 throw std::invalid_argument("Bad key of '" + std::string(1, k) + "'");
914 }
915 }
917 const auto& get_invlnp() {
918 // Lazily construct on the first access
919 if (!m_invlnp) {
920 // Degree of expansion is the same as
921 auto Ndegree = m_p.get_expansions()[0].coeff().size() - 1;
922 m_invlnp = make_invlnp(Ndegree);
923 }
924 return m_invlnp;
925 }
927 const double get_pmin() const {
928 return m_pmin;
929 }
931 const double get_pmax() const {
932 return m_pmax;
933 }
935 const double get_Tmin() const {
936 return m_Tmin;
937 }
939 const double get_Tcrit_num() const {
940 return m_Tcrit_num;
941 }
943 const double get_rhocrit_num() const {
944 return m_rhocrit_num;
945 }
946
952 void add_variable(char var, const std::function<double(double, double)>& caller) {
953 Eigen::MatrixXd Lmat, Umat;
954 std::tie(Lmat, Umat) = detail::get_LU_matrices(12);
955
956 auto builder = [&](char var, auto& variantL, auto& variantV) {
957 std::vector<ChebyshevExpansion<ArrayType>> newexpL, newexpV;
958 const auto& expsL = get_approx1d('D', 0);
959 const auto& expsV = get_approx1d('D', 1);
960 if (expsL.get_expansions().size() != expsV.get_expansions().size()) {
961 throw std::invalid_argument("L&V are not the same size");
962 }
963 for (auto i = 0U; i < expsL.get_expansions().size(); ++i) {
964 const auto& expL = expsL.get_expansions()[i];
965 const auto& expV = expsV.get_expansions()[i];
966 const auto& T = expL.get_nodes_realworld();
967 // Get the functional values at the Chebyshev nodes
968 Eigen::ArrayXd funcL = Umat * expL.coeff().matrix();
969 Eigen::ArrayXd funcV = Umat * expV.coeff().matrix();
970 // Apply the function inplace to do the transformation
971 // of the functional values at the nodes
972 for (auto j = 0; j < funcL.size(); ++j) {
973 funcL(j) = caller(T(j), funcL(j));
974 funcV(j) = caller(T(j), funcV(j));
975 }
976 // And now rebuild the expansions by left-multiplying by the L matrix
977 newexpL.emplace_back(expL.xmin(), expL.xmax(), (Lmat * funcL.matrix()).eval());
978 newexpV.emplace_back(expV.xmin(), expV.xmax(), (Lmat * funcV.matrix()).eval());
979 }
980
981 variantL.emplace(std::move(newexpL));
982 variantV.emplace(std::move(newexpV));
983 };
984
985 switch (var) {
986 case 'H':
987 builder(var, m_hL, m_hV);
988 break;
989 case 'S':
990 builder(var, m_sL, m_sV);
991 break;
992 case 'U':
993 builder(var, m_uL, m_uV);
994 break;
995 default:
996 throw std::invalid_argument("nope");
997 }
998 }
999
1005 double eval_sat(double T, char k, short Q) const {
1006 if (Q == 1 || Q == 0) {
1007 return get_approx1d(k, Q).eval(T);
1008 } else {
1009 throw std::invalid_argument("invalid_value for Q");
1010 }
1011 }
1012
1016 template <typename Container>
1017 void eval_sat_many(const Container& T, char k, short Q, Container& y) const {
1018 if (T.size() != y.size()) {
1019 throw std::invalid_argument("T and y are not the same size");
1020 }
1021 const auto& approx = get_approx1d(k, Q);
1022 for (auto i = 0; i < T.size(); ++i) {
1023 y(i) = approx.eval(T(i));
1024 }
1025 }
1026
1030 template <typename Container>
1031 void eval_sat_manyC(const Container T[], std::size_t N, char k, short Q, Container y[]) const {
1032 // if (T.size() != y.size()){ throw std::invalid_argument("T and y are not the same size"); }
1033 const auto& approx = get_approx1d(k, Q);
1034 for (std::size_t i = 0; i < N; ++i) {
1035 y[i] = approx.eval(T[i]);
1036 }
1037 }
1038
1048 auto solve_for_T(double propval, char k, bool Q, unsigned int bits = 64, unsigned int max_iter = 100, double boundsftol = 1e-13) const {
1049 return get_approx1d(k, Q).get_x_for_y(propval, bits, max_iter, boundsftol);
1050 }
1051
1058 auto get_vaporquality(double T, double propval, char k) const {
1059 if (k == 'D') {
1060 // Need to special-case here because v = q*v_V + (1-q)*v_V but it is NOT(!!!!) the case that rho = q*rho_V + (1-q)*rho_L
1061 double v_L = 1 / get_approx1d('D', 0).eval(T);
1062 double v_V = 1 / get_approx1d('D', 1).eval(T);
1063 return (1 / propval - v_L) / (v_V - v_L);
1064 } else {
1065 double L = get_approx1d(k, 0).eval(T);
1066 double V = get_approx1d(k, 1).eval(T);
1067 return (propval - L) / (V - L);
1068 }
1069 }
1070
1076 auto get_T_from_p(double p) {
1077 return get_invlnp().value().eval(log(p));
1078 }
1079
1087 auto get_yval(double T, double q, char k) const {
1088
1089 if (k == 'D') {
1090 // Need to special-case here because v = q*v_V + (1-q)*v_V but it is NOT(!!!!) the case that rho = q*rho_V + (1-q)*rho_L
1091 double v_L = 1 / get_approx1d('D', 0).eval(T);
1092 double v_V = 1 / get_approx1d('D', 1).eval(T);
1093 double v = q * v_V + (1 - q) * v_L;
1094 return 1 / v; // rho = 1/v
1095 } else {
1096 double L = get_approx1d(k, 0).eval(T);
1097 double V = get_approx1d(k, 1).eval(T);
1098 return q * V + (1 - q) * L;
1099 }
1100 }
1101
1103 template <typename Container>
1104 void get_yval_many(const Container& T, char k, const Container& q, Container& y) const {
1105 if (T.size() != y.size() || T.size() != q.size()) {
1106 throw std::invalid_argument("T, q, and y are not all the same size");
1107 }
1108
1109 const auto& L = get_approx1d(k, 0);
1110 const auto& V = get_approx1d(k, 1);
1111
1112 if (k == 'D') {
1113 for (auto i = 0; i < T.size(); ++i) {
1114 // Need to special-case here because v = q*v_V + (1-q)*v_V but it is NOT(!!!!) the case that rho = q*rho_V + (1-q)*rho_L
1115 double v_L = 1.0 / L.eval(T(i));
1116 double v_V = 1.0 / V.eval(T(i));
1117 double v = q(i) * v_V + (1 - q(i)) * v_L;
1118 y(i) = 1 / v;
1119 }
1120 } else {
1121 for (auto i = 0; i < T.size(); ++i) {
1122 y(i) = q(i) * V.eval(T(i)) + (1 - q(i)) * L.eval(T(i));
1123 }
1124 }
1125 }
1133 auto get_all_intersections(const char k, const double val, unsigned int bits, std::size_t max_iter, double boundsftol) const {
1134 const auto& L = get_approx1d(k, 0);
1135 const auto& V = get_approx1d(k, 1);
1136 auto TsatL = L.get_x_for_y(val, bits, max_iter, boundsftol);
1137 const auto TsatV = V.get_x_for_y(val, bits, max_iter, boundsftol);
1138 for (auto&& el : TsatV) {
1139 TsatL.push_back(el);
1140 }
1141 // TsatL.insert(TsatL.end(),
1142 // std::make_move_iterator(TsatV.begin() + TsatV.size()),
1143 // std::make_move_iterator(TsatV.end()));
1144 return TsatL;
1145 }
1146
1161 std::optional<SuperAncillaryTwoPhaseSolution> iterate_for_Tq_XY(double Tmin, double Tmax, char ch1, double val1, char ch2, double val2,
1162 unsigned int bits, std::size_t max_iter, double boundsftol) const {
1163
1164 std::size_t counter = 0;
1165 auto f = [&](double T_) {
1166 counter++;
1167 double q_fromv1 = get_vaporquality(T_, val1, ch1);
1168 double resid = get_yval(T_, q_fromv1, ch2) - val2;
1169 return resid;
1170 };
1171 using namespace boost::math::tools;
1172 double fTmin = f(Tmin), fTmax = f(Tmax);
1173
1174 // First we check if we are converged enough (TOMS748 does not stop based on function value)
1175 double T;
1176
1177 // A little lambda to make it easier to return
1178 // in different logical branches
1179 auto returner = [&]() {
1181 soln.T = T;
1182 soln.q = get_vaporquality(T, val1, ch1);
1183 soln.counter = counter;
1184 return soln;
1185 };
1186
1187 if (std::abs(fTmin) < boundsftol) {
1188 T = Tmin;
1189 return returner();
1190 }
1191 if (std::abs(fTmax) < boundsftol) {
1192 T = Tmax;
1193 return returner();
1194 }
1195 if (fTmin * fTmax > 0) {
1196 // No sign change, this means that the inputs are not within the two-phase region
1197 // and thus no iteration is needed
1198 return std::nullopt;
1199 }
1200 // Neither bound meets the convergence criterion, we need to iterate on temperature
1201 try {
1202 boost::math::uintmax_t max_iter_ = static_cast<boost::math::uintmax_t>(max_iter);
1203 auto [l, r] = toms748_solve(f, Tmin, Tmax, fTmin, fTmax, eps_tolerance<double>(bits), max_iter_);
1204 T = (r + l) / 2.0;
1205 return returner();
1206 } catch (...) {
1207 std::cout << "fTmin,fTmax: " << fTmin << "," << fTmax << std::endl;
1208 throw;
1209 }
1210 }
1211
1221 std::optional<SuperAncillaryTwoPhaseSolution> solve_for_Tq_DX(const double rho, const double propval, const char k, unsigned int bits,
1222 std::size_t max_iter, double boundsftol) const {
1223
1224 const auto& Lrho = get_approx1d('D', 0);
1225 const auto& Vrho = get_approx1d('D', 1);
1226 auto Tsat = get_all_intersections(k, propval, bits, max_iter, boundsftol);
1227 std::size_t rhosat_soln_count = Tsat.size();
1228
1229 std::tuple<double, double> Tsearchrange;
1230 if (rhosat_soln_count == 1) {
1231 // Search the complete range from Tmin to the intersection point where rhosat(T) = rho
1232 // obtained just above
1233 Tsearchrange = std::make_tuple(Lrho.xmin * 0.999, std::get<0>(Tsat[0]));
1234 } else if (rhosat_soln_count == 2) {
1235 double y1 = std::get<0>(Tsat[0]), y2 = std::get<0>(Tsat[1]);
1236 if (y2 < y1) {
1237 std::swap(y2, y2);
1238 } // Required to be sorted in increasing value
1239 Tsearchrange = std::make_tuple(y1, y2);
1240 } else {
1241 throw std::invalid_argument("cannot have number of solutions other than 1 or 2; got " + std::to_string(rhosat_soln_count) + " solutions");
1242 }
1243 auto [a, b] = Tsearchrange;
1244 return iterate_for_Tq_XY(a, b, 'D', rho, k, propval, bits, max_iter, boundsftol);
1245 }
1246
1248 template <typename Container>
1249 void solve_for_Tq_DX_many(const Container& rho, const Container& propval, const char k, unsigned int bits, std::size_t max_iter,
1250 double boundsftol, Container& T, Container& q, Container& count) {
1251 if (std::set<std::size_t>({rho.size(), propval.size(), T.size(), q.size(), count.size()}).size() != 1) {
1252 throw std::invalid_argument("rho, propval, T, q, count are not all the same size");
1253 }
1254 for (auto i = 0U; i < T.size(); ++i) {
1255 auto osoln = solve_for_Tq_DX(rho(i), propval(i), k, bits, max_iter, boundsftol);
1256 if (osoln) {
1257 const auto& soln = osoln.value();
1258 T(i) = soln.T;
1259 q(i) = soln.q;
1260 count(i) = soln.counter;
1261 } else {
1262 T(i) = -1;
1263 q(i) = -1;
1264 count(i) = -1;
1265 }
1266 }
1267 }
1268
1269 // /**
1270 // The high-level function used to carry out a solution. It handles all the different permutations of variables and delegates to lower-level functions to actually od the calculations
1271
1272 // \param pair The enumerated pair of thermodynamic variables being provided
1273 // \param val1 The first value
1274 // \param val2 The second value
1275 // */
1276 // auto flash(PropertyPairs pair, double val1, double val2) const -> std::optional<SuperAncillaryTwoPhaseSolution>{
1277 // double T, q;
1278 // std::size_t counter = 0;
1279 // double boundsftol = 1e-12;
1280
1281 // auto returner = [&](){
1282 // SuperAncillaryTwoPhaseSolution soln;
1283 // soln.T = T;
1284 // soln.q = q;
1285 // soln.counter = counter;
1286 // return soln;
1287 // };
1288
1289 // auto get_T_other = [&](){
1290 // switch (pair){
1291 // case PropertyPairs::DT: return std::make_tuple(val2, val1, 'D');
1292 // case PropertyPairs::HT: return std::make_tuple(val2, val1, 'H');
1293 // case PropertyPairs::ST: return std::make_tuple(val2, val1, 'S');
1294 // case PropertyPairs::TU: return std::make_tuple(val1, val2, 'U');
1295 // default:
1296 // throw std::invalid_argument("not valid");
1297 // };
1298 // };
1299 // auto get_p_other = [&](){
1300 // switch (pair){
1301 // case PropertyPairs::DP: return std::make_tuple(val2, val1, 'D');
1302 // case PropertyPairs::HP: return std::make_tuple(val2, val1, 'H');
1303 // case PropertyPairs::PS: return std::make_tuple(val1, val2, 'S');
1304 // case PropertyPairs::PU: return std::make_tuple(val1, val2, 'U');
1305 // default:
1306 // throw std::invalid_argument("not valid");
1307 // };
1308 // };
1309 // auto get_rho_other = [&](){
1310 // switch (pair){
1311 // case PropertyPairs::DH: return std::make_tuple(val1, val2, 'H');
1312 // case PropertyPairs::DS: return std::make_tuple(val1, val2, 'S');
1313 // case PropertyPairs::DU: return std::make_tuple(val1, val2, 'U');
1314 // default:
1315 // throw std::invalid_argument("not valid");
1316 // };
1317 // };
1318
1319 // // Given the arguments, unpack them into (xchar, xval, ychar, yval) where x is the
1320 // // variable of convenience that will be used to do the interval intersection and then
1321 // // the iteration will be in the other variable
1322 // auto parse_HSU = [&](){
1323 // switch (pair){
1324 // case PropertyPairs::HS: return std::make_tuple('S', val2, 'H', val1);
1325 // case PropertyPairs::SU: return std::make_tuple('S', val1, 'U', val2);
1326 // case PropertyPairs::HU: return std::make_tuple('H', val1, 'U', val2);
1327 // default:
1328 // throw std::invalid_argument("not valid");
1329 // };
1330 // };
1331
1332 // switch(pair){
1333 // // Case 0, PT is always single-phase, by definition
1334 // case PropertyPairs::PT: throw std::invalid_argument("PT inputs are trivial");
1335
1336 // // Case 1, T is a given variable
1337 // case PropertyPairs::DT:
1338 // case PropertyPairs::HT:
1339 // case PropertyPairs::ST:
1340 // case PropertyPairs::TU:
1341 // {
1342 // auto [Tval, other, otherchar] = get_T_other();
1343 // q = get_vaporquality(Tval, other, otherchar);
1344 // T = Tval;
1345 // if (0 <= q && q <= 1){
1346 // return returner();
1347 // }
1348 // break;
1349 // }
1350 // // Case 2, p is a given variable, p gets converted to T, and then q calculated
1351 // case PropertyPairs::DP:
1352 // case PropertyPairs::HP:
1353 // case PropertyPairs::PS:
1354 // case PropertyPairs::PU:
1355 // {
1356 // auto [p, other, otherchar] = get_p_other();
1357 // T = get_T_from_p(p);
1358 // q = get_vaporquality(T, other, otherchar); // Based on the T and the other variable
1359 // if (0 <= q && q <= 1){
1360 // return returner();
1361 // }
1362 // break;
1363 // }
1364 // // Case 3, rho is a given variable, special case that because it is relatively simple
1365 // // and only water and heavy water have more than one density solution along the saturation curve
1366 // case PropertyPairs::DH:
1367 // case PropertyPairs::DS:
1368 // case PropertyPairs::DU:
1369 // {
1370 // auto [rho, otherval, otherchar] = get_rho_other();
1371 // auto optsoln = solve_for_Tq_DX(rho, otherval, otherchar, 64, 100, boundsftol);
1372 // if (optsoln){
1373 // const auto& soln = optsoln.value();
1374 // T = soln.T;
1375 // q = soln.q;
1376 // counter = soln.counter;
1377 // return returner();
1378 // }
1379 // break;
1380 // }
1381 // // Case 4, handle all the cases where complicated interval checks are
1382 // // necessary
1383 // case PropertyPairs::HS:
1384 // case PropertyPairs::HU:
1385 // case PropertyPairs::SU:
1386 // {
1387 // auto [xchar, xval, ychar, yval] = parse_HSU();
1388 // auto Tsat = get_all_intersections(xchar, xval, 64, 100, boundsftol);
1389 // auto sort_predicate = [](const auto& x, const auto& y) {
1390 // return std::get<0>(x) < std::get<0>(y);
1391 // };
1392 // std::sort(Tsat.begin(), Tsat.end(), sort_predicate);
1393 // // std::cout << Tsat.size() << " T crossings for " << xval << std::endl;
1394 // // for (auto & el : Tsat){
1395 // // std::cout << std::get<0>(el) << std::endl;
1396 // // }
1397 // if (Tsat.size() == 1){
1398 // // A single crossing, in which the other temperature bound for iteration
1399 // // is assumed to be the minimum temperature of the superancillary
1400 // double Tmin = get_approx1d('D', 0).xmin*0.9999;
1401 // double Tmax = std::get<0>(Tsat[0]);
1402 // auto o = iterate_for_Tq_XY(Tmin, Tmax, xchar, xval, ychar, yval, 64, 100, boundsftol);
1403 // if (o){ return o.value(); }
1404 // }
1405 // else if (Tsat.size() % 2 == 0){ // Even number of crossings
1406 // // neighboring intersections should bound the solution, or if not, the point is single-phase
1407 // for (auto i = 0; i < Tsat.size(); i += 2){
1408 // auto o = iterate_for_Tq_XY(std::get<0>(Tsat[i]), std::get<0>(Tsat[i+1]), xchar, xval, ychar, yval, 64, 100, boundsftol);
1409 // if (o){ return o.value(); }
1410 // }
1411 // }
1412 // else{
1413 // throw std::invalid_argument("Don't know what to do about this number of T crossings ("+std::to_string(Tsat.size())+") yet");
1414 // }
1415 // break;
1416 // }
1417 // default:
1418 // throw std::invalid_argument("These inputs are not yet supported in flash");
1419 // }
1420 // return std::nullopt;
1421 // }
1422
1423 // /// A vectorized version of the flash function used in the Python interface for profiling
1424 // template <typename Container>
1425 // void flash_many(const PropertyPairs ppair, const Container& val1, const Container& val2, Container& T, Container& q, Container& count){
1426 // if (std::set<std::size_t>({val1.size(), val2.size(), T.size(), q.size(), count.size()}).size() != 1){
1427 // throw std::invalid_argument("val1, val2, T, q, count are not all the same size");
1428 // }
1429 // for (auto i = 0U; i < T.size(); ++i){
1430 // try{
1431 // auto osoln = flash(ppair, val1(i), val2(i));
1432 // if (osoln){
1433 // const auto& soln = osoln.value();
1434 // T(i) = soln.T;
1435 // q(i) = soln.q;
1436 // count(i) = soln.counter;
1437 // }
1438 // else{
1439 // T(i) = -1;
1440 // q(i) = -1;
1441 // count(i) = -1;
1442 // }
1443 // }
1444 // catch(std::exception&e){
1445 // // std::cout << e.what() << " for " << val1(i) << "," << val2(i) << std::endl;
1446 // T(i) = -2;
1447 // q(i) = -2;
1448 // count(i) = -2;
1449 // }
1450
1451 // }
1452 // }
1453};
1454
1455static_assert(std::is_copy_constructible_v<SuperAncillary<std::vector<double>>>);
1456static_assert(std::is_copy_assignable_v<SuperAncillary<std::vector<double>>>);
1457
1458} // namespace superancillary
1459} // namespace CoolProp