diff --git a/photonlib-cpp-examples/poseest/src/main/include/Constants.h b/photonlib-cpp-examples/poseest/src/main/include/Constants.h index d55d4fa7ea..85aa8d1d8c 100644 --- a/photonlib-cpp-examples/poseest/src/main/include/Constants.h +++ b/photonlib-cpp-examples/poseest/src/main/include/Constants.h @@ -49,6 +49,12 @@ inline const wpi::apriltag::AprilTagFieldLayout kTagLayout{ inline const Eigen::Matrix kSingleTagStdDevs{4, 4, 8}; inline const Eigen::Matrix kMultiTagStdDevs{0.5, 0.5, 1}; + +// Constants for filtering vision estimates. +inline constexpr double kMaxAcceptedRobotZ = 0.2; +inline constexpr double kMaxAcceptedRobotPitch = 0.2; +inline constexpr double kMaxAcceptedRobotRoll = 0.2; +inline constexpr double kMaxAcceptedPoseAmbiguity = 0.2; } // namespace Vision namespace Swerve { using namespace units; diff --git a/photonlib-cpp-examples/poseest/src/main/include/Vision.h b/photonlib-cpp-examples/poseest/src/main/include/Vision.h index ba2fa80803..38ca0f6a3d 100644 --- a/photonlib-cpp-examples/poseest/src/main/include/Vision.h +++ b/photonlib-cpp-examples/poseest/src/main/include/Vision.h @@ -93,9 +93,30 @@ class Vision { } } + // Filter out poses that are likely to be incorrect (off the field, not + // flat on the ground, etc). This is optional, but can help prevent bad + // vision estimates from hurting your pose estimator. if (visionEst) { - estConsumer(visionEst->estimatedPose.ToPose2d(), visionEst->timestamp, - GetEstimationStdDevs(visionEst->estimatedPose.ToPose2d())); + auto estPose = visionEst->estimatedPose; + if (std::abs(estPose.Z().value()) < + constants::Vision::kMaxAcceptedRobotZ && + estPose.X().value() > + -constants::Vision::kTagLayout.GetFieldLength().value() / 2 && + estPose.X().value() < + constants::Vision::kTagLayout.GetFieldLength().value() / 2 && + estPose.Y().value() > + -constants::Vision::kTagLayout.GetFieldWidth().value() / 2 && + estPose.Y().value() < + constants::Vision::kTagLayout.GetFieldWidth().value() / 2 && + std::abs(estPose.Rotation().X().value()) < + constants::Vision::kMaxAcceptedRobotPitch && + std::abs(estPose.Rotation().Y().value()) < + constants::Vision::kMaxAcceptedRobotRoll && + result.GetBestTarget().GetPoseAmbiguity() < + constants::Vision::kMaxAcceptedPoseAmbiguity) { + estConsumer(estPose.ToPose2d(), visionEst->timestamp, + GetEstimationStdDevs(estPose.ToPose2d())); + } } } } diff --git a/photonlib-java-examples/poseest/src/main/java/frc/robot/Constants.java b/photonlib-java-examples/poseest/src/main/java/frc/robot/Constants.java index 71a0ca002f..243e9b30af 100644 --- a/photonlib-java-examples/poseest/src/main/java/frc/robot/Constants.java +++ b/photonlib-java-examples/poseest/src/main/java/frc/robot/Constants.java @@ -52,6 +52,12 @@ public static class Vision { // (Fake values. Experiment and determine estimation noise on an actual robot.) public static final Matrix kSingleTagStdDevs = VecBuilder.fill(4, 4, 8); public static final Matrix kMultiTagStdDevs = VecBuilder.fill(0.5, 0.5, 1); + + // Constants for filtering vision estimates. + public static final double kMaxAcceptedRobotZ = 0.2; + public static final double kMaxAcceptedRobotPitch = 0.2; + public static final double kMaxAcceptedRobotRoll = 0.2; + public static final double kMaxAcceptedPoseAmbiguity = 0.2; } public static class Swerve { diff --git a/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java b/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java index ef7118f54a..a3b47ccdca 100644 --- a/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java +++ b/photonlib-java-examples/poseest/src/main/java/frc/robot/Vision.java @@ -110,7 +110,19 @@ public void periodic() { // Change our trust in the measurement based on the tags we can see var estStdDevs = getEstimationStdDevs(); - estConsumer.accept(est.estimatedPose.toPose2d(), est.timestampSeconds, estStdDevs); + // Filter out poses that are likely to be incorrect (off the field, not flat on the + // ground, etc). This is optional, but can help prevent bad vision estimates from + // hurting your pose estimator. + if (Math.abs(estPose.getZ()) < kMaxAcceptedRobotZ + && estPose.getX() > -kTagLayout.getFieldLength() / 2 + && estPose.getX() < kTagLayout.getFieldLength() / 2 + && estPose.getY() > -kTagLayout.getFieldWidth() / 2 + && estPose.getY() < kTagLayout.getFieldWidth() / 2 + && Math.abs(estPose.getRotation().getX()) < kMaxAcceptedRobotPitch + && Math.abs(estPose.getRotation().getY()) < kMaxAcceptedRobotRoll + && result.getBestTarget().poseAmbiguity < kMaxAcceptedPoseAmbiguity) { + estConsumer.accept(est.estimatedPose.toPose2d(), est.timestampSeconds, estStdDevs); + } }); } } diff --git a/photonlib-python-examples/poseest/robot.py b/photonlib-python-examples/poseest/robot.py index f06e982869..1d0e982d09 100644 --- a/photonlib-python-examples/poseest/robot.py +++ b/photonlib-python-examples/poseest/robot.py @@ -35,6 +35,12 @@ wpimath.geometry.Rotation3d.fromDegrees(0.0, -30.0, 0.0), ) +# Constants for filtering vision estimates. +kMaxAcceptedRobotZ = 0.2 +kMaxAcceptedRobotPitch = 0.2 +kMaxAcceptedRobotRoll = 0.2 +kMaxAcceptedPoseAmbiguity = 0.2 + class MyRobot(wpilib.TimedRobot): def robotInit(self) -> None: @@ -42,8 +48,9 @@ def robotInit(self) -> None: self.controller = wpilib.XboxController(0) self.swerve = drivetrain.Drivetrain() self.cam = PhotonCamera("YOUR CAMERA NAME") + self.aprilTagField = AprilTagFieldLayout.loadField(AprilTagField.kDefaultField) self.camPoseEst = PhotonPoseEstimator( - AprilTagFieldLayout.loadField(AprilTagField.kDefaultField), + self.aprilTagField, kRobotToCam, ) @@ -53,9 +60,24 @@ def robotPeriodic(self) -> None: if camEstPose is None: camEstPose = self.camPoseEst.estimateLowestAmbiguityPose(result) - self.swerve.addVisionPoseEstimate( - camEstPose.estimatedPose, camEstPose.timestampSeconds - ) + # Filter out poses that are likely to be incorrect (off the field, not flat on the ground, etc). + # This is optional, but can help prevent bad vision estimates from hurting your pose estimator. + estPose = camEstPose.estimatedPose + if ( + abs(estPose.Z()) < kMaxAcceptedRobotZ + and -self.aprilTagField.getFieldLength() / 2 + < estPose.X() + < self.aprilTagField.getFieldLength() / 2 + and -self.aprilTagField.getFieldWidth() / 2 + < estPose.Y() + < self.aprilTagField.getFieldWidth() / 2 + and abs(estPose.rotation().X()) < kMaxAcceptedRobotPitch + and abs(estPose.rotation().Y()) < kMaxAcceptedRobotRoll + and result.getBestTarget().poseAmbiguity < kMaxAcceptedPoseAmbiguity + ): + self.swerve.addVisionPoseEstimate( + camEstPose.estimatedPose, camEstPose.timestampSeconds + ) self.swerve.updateOdometry() self.swerve.log()