@@ -910,3 +910,87 @@ TEST_F(AgradRev, matrix_compile_time_conversions) {
910910 EXPECT_MATRIX_FLOAT_EQ (colvec.val (), rowvec.val ());
911911 EXPECT_MATRIX_FLOAT_EQ (x11.val (), rowvec.val ());
912912}
913+
914+ TEST_F (AgradRev, assign_nan_varmat) {
915+ using stan::math::var_value;
916+ using var_vector = var_value<Eigen::Matrix<double , -1 , 1 >>;
917+ using stan::math::var;
918+ Eigen::VectorXd x_val (10 );
919+ for (int i = 0 ; i < 10 ; ++i) {
920+ x_val (i) = i + 0.1 ;
921+ }
922+ var_vector x (x_val);
923+ var_vector y = var_vector (Eigen::Matrix<double , -1 , 1 >::Constant (
924+ 10 , std::numeric_limits<double >::quiet_NaN ()));
925+ y = stan::math::head (x, 10 );
926+ var sigma = 1.0 ;
927+ var lp = stan::math::normal_lpdf<false >(y, 0 , sigma);
928+ lp.grad ();
929+ Eigen::VectorXd x_ans_adj (10 );
930+ for (int i = 0 ; i < 10 ; ++i) {
931+ x_ans_adj (i) = -(i + 0.1 );
932+ }
933+ EXPECT_MATRIX_EQ (x.adj (), x_ans_adj);
934+ Eigen::VectorXd y_ans_adj = Eigen::VectorXd::Zero (10 );
935+ EXPECT_MATRIX_EQ (y_ans_adj, y.adj ());
936+ }
937+
938+ TEST_F (AgradRev, assign_nan_matvar) {
939+ using stan::math::var;
940+ using var_vector = Eigen::Matrix<var, -1 , 1 >;
941+ Eigen::VectorXd x_val (10 );
942+ for (int i = 0 ; i < 10 ; ++i) {
943+ x_val (i) = i + 0.1 ;
944+ }
945+ var_vector x (x_val);
946+ var_vector y = var_vector (Eigen::Matrix<double , -1 , 1 >::Constant (
947+ 10 , std::numeric_limits<double >::quiet_NaN ()));
948+ // need to store y's previous vari pointers
949+ var_vector z = y;
950+ y = stan::math::head (x, 10 );
951+ var sigma = 1.0 ;
952+ var lp = stan::math::normal_lpdf<false >(y, 0 , sigma);
953+ lp.grad ();
954+ Eigen::VectorXd x_ans_adj (10 );
955+ for (int i = 0 ; i < 10 ; ++i) {
956+ x_ans_adj (i) = -(i + 0.1 );
957+ }
958+ EXPECT_MATRIX_EQ (x.adj (), x_ans_adj);
959+ Eigen::VectorXd z_ans_adj = Eigen::VectorXd::Zero (10 );
960+ EXPECT_MATRIX_EQ (z_ans_adj, z.adj ());
961+ }
962+
963+ /* *
964+ * For var<Matrix> and Matrix<var>, we need to make sure
965+ * the tape, when going through reverse mode, leads to the same outcomes.
966+ * In the case where we declare a var<Matrix> without initializing it, aka
967+ * `var_value<Eigen::MatrixXd>`, we need to think about what the equivalent
968+ * behavior is for `Eigen::Matrix<var, -1, -1>`.
969+ * When default constructing `Eigen::Matrix<var, -1, -1>` we would have an array
970+ * of `var` types with `nullptr` as the vari. The first assignment to that array
971+ * would then just copy the vari pointer from the other array. This is the
972+ * behavior we want to mimic for `var_value<Eigen::MatrixXd>`. So in this test
973+ * show that for uninitialized `var_value<Eigen::MatrixXd>`, we can assign it
974+ * and the adjoints are the same as x.
975+ */
976+ TEST_F (AgradRev, assign_nullptr_var) {
977+ using stan::math::var_value;
978+ using var_vector = var_value<Eigen::Matrix<double , -1 , 1 >>;
979+ using stan::math::var;
980+ Eigen::VectorXd x_val (10 );
981+ for (int i = 0 ; i < 10 ; ++i) {
982+ x_val (i) = i + 0.1 ;
983+ }
984+ var_vector x (x_val);
985+ var_vector y;
986+ y = stan::math::head (x, 10 );
987+ var sigma = 1.0 ;
988+ var lp = stan::math::normal_lpdf<false >(y, 0 , sigma);
989+ lp.grad ();
990+ Eigen::VectorXd x_ans_adj (10 );
991+ for (int i = 0 ; i < 10 ; ++i) {
992+ x_ans_adj (i) = -(i + 0.1 );
993+ }
994+ EXPECT_MATRIX_EQ (x.adj (), x_ans_adj);
995+ EXPECT_MATRIX_EQ (x_ans_adj, y.adj ());
996+ }
0 commit comments