Loading...
Searching...
No Matches
RRTXstatic.cpp
1/*********************************************************************
2* Software License Agreement (BSD License)
3*
4* Copyright (c) 2016, Georgia Institute of Technology
5* All rights reserved.
6*
7* Redistribution and use in source and binary forms, with or without
8* modification, are permitted provided that the following conditions
9* are met:
10*
11* * Redistributions of source code must retain the above copyright
12* notice, this list of conditions and the following disclaimer.
13* * Redistributions in binary form must reproduce the above
14* copyright notice, this list of conditions and the following
15* disclaimer in the documentation and/or other materials provided
16* with the distribution.
17* * Neither the name of the Rice University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32* POSSIBILITY OF SUCH DAMAGE.
33*********************************************************************/
34
35/* Author: Florian Hauer */
36
37#include "ompl/geometric/planners/rrt/RRTXstatic.h"
38#include <algorithm>
39#include <boost/math/constants/constants.hpp>
40#include <limits>
41#include "ompl/base/Goal.h"
42#include "ompl/base/goals/GoalSampleableRegion.h"
43#include "ompl/base/goals/GoalState.h"
44#include "ompl/base/objectives/PathLengthOptimizationObjective.h"
45#include "ompl/base/samplers/InformedStateSampler.h"
46#include "ompl/base/samplers/informed/RejectionInfSampler.h"
47#include "ompl/tools/config/SelfConfig.h"
48#include "ompl/util/GeometricEquations.h"
49
50ompl::geometric::RRTXstatic::RRTXstatic(const base::SpaceInformationPtr &si)
51 : base::Planner(si, "RRTXstatic")
52 , mc_(opt_, pdef_)
53 , q_(mc_)
54{
55 specs_.approximateSolutions = true;
56 specs_.optimizingPaths = true;
57 specs_.canReportIntermediateSolutions = true;
58
59 Planner::declareParam<double>("range", this, &RRTXstatic::setRange, &RRTXstatic::getRange, "0.:1.:10000.");
60 Planner::declareParam<double>("goal_bias", this, &RRTXstatic::setGoalBias, &RRTXstatic::getGoalBias, "0.:.05:1.");
61 Planner::declareParam<double>("epsilon", this, &RRTXstatic::setEpsilon, &RRTXstatic::getEpsilon, "0.:.01:10.");
62 Planner::declareParam<double>("rewire_factor", this, &RRTXstatic::setRewireFactor, &RRTXstatic::getRewireFactor,
63 "1.0:0.01:2."
64 "0");
65 Planner::declareParam<bool>("use_k_nearest", this, &RRTXstatic::setKNearest, &RRTXstatic::getKNearest, "0,1");
66 Planner::declareParam<bool>("update_children", this, &RRTXstatic::setUpdateChildren, &RRTXstatic::getUpdateChildren,
67 "0,1");
68 Planner::declareParam<int>("rejection_variant", this, &RRTXstatic::setVariant, &RRTXstatic::getVariant, "0:3");
69 Planner::declareParam<double>("rejection_variant_alpha", this, &RRTXstatic::setAlpha, &RRTXstatic::getAlpha, "0.:"
70 "1.");
71 Planner::declareParam<bool>("informed_sampling", this, &RRTXstatic::setInformedSampling,
72 &RRTXstatic::getInformedSampling, "0,"
73 "1");
74 Planner::declareParam<bool>("sample_rejection", this, &RRTXstatic::setSampleRejection,
75 &RRTXstatic::getSampleRejection, "0,1");
76 Planner::declareParam<unsigned int>("number_sampling_attempts", this, &RRTXstatic::setNumSamplingAttempts,
77 &RRTXstatic::getNumSamplingAttempts, "10:10:100000");
78
79 addPlannerProgressProperty("iterations INTEGER", [this] { return numIterationsProperty(); });
80 addPlannerProgressProperty("motions INTEGER", [this] { return numMotionsProperty(); });
81 addPlannerProgressProperty("best cost REAL", [this] { return bestCostProperty(); });
82}
83
84ompl::geometric::RRTXstatic::~RRTXstatic()
85{
86 freeMemory();
87}
88
90{
91 Planner::setup();
92 tools::SelfConfig sc(si_, getName());
93 sc.configurePlannerRange(maxDistance_);
94 if (!si_->getStateSpace()->hasSymmetricDistance() || !si_->getStateSpace()->hasSymmetricInterpolate())
95 {
96 OMPL_WARN("%s requires a state space with symmetric distance and symmetric interpolation.", getName().c_str());
97 }
98
99 if (!nn_)
100 nn_.reset(tools::SelfConfig::getDefaultNearestNeighbors<Motion *>(this));
101 nn_->setDistanceFunction([this](const Motion *a, const Motion *b) { return distanceFunction(a, b); });
102
103 // Setup optimization objective
104 //
105 // If no optimization objective was specified, then default to
106 // optimizing path length as computed by the distance() function
107 // in the state space.
108 if (pdef_)
109 {
110 if (pdef_->hasOptimizationObjective())
111 opt_ = pdef_->getOptimizationObjective();
112 else
113 {
114 OMPL_INFORM("%s: No optimization objective specified. Defaulting to optimizing path length for the allowed "
115 "planning time.",
116 getName().c_str());
117 opt_ = std::make_shared<base::PathLengthOptimizationObjective>(si_);
118
119 // Store the new objective in the problem def'n
120 pdef_->setOptimizationObjective(opt_);
121 }
122 mc_ = MotionCompare(opt_, pdef_);
124 }
125 else
126 {
127 OMPL_INFORM("%s: problem definition is not set, deferring setup completion...", getName().c_str());
128 setup_ = false;
129 }
130
131 // Calculate some constants:
132 calculateRewiringLowerBounds();
133
134 // Set the bestCost_ and prunedCost_ as infinite
135 bestCost_ = opt_->infiniteCost();
136}
137
139{
140 setup_ = false;
141 Planner::clear();
142 sampler_.reset();
143 infSampler_.reset();
144 freeMemory();
145 if (nn_)
146 nn_->clear();
147
148 lastGoalMotion_ = nullptr;
149 goalMotions_.clear();
150
151 iterations_ = 0;
152 bestCost_ = base::Cost(std::numeric_limits<double>::quiet_NaN());
153}
154
156{
157 checkValidity();
158 base::Goal *goal = pdef_->getGoal().get();
159 auto *goal_s = dynamic_cast<base::GoalSampleableRegion *>(goal);
160
161 // Check if there are more starts
162 if (pis_.haveMoreStartStates() == true)
163 {
164 // There are, add them
165 while (const base::State *st = pis_.nextStart())
166 {
167 auto *motion = new Motion(si_);
168 si_->copyState(motion->state, st);
169 motion->cost = opt_->identityCost();
170 nn_->add(motion);
171 }
172
173 // And assure that, if we're using an informed sampler, it's reset
174 infSampler_.reset();
175 }
176 // No else
177
178 if (nn_->size() == 0)
179 {
180 OMPL_ERROR("%s: There are no valid initial states!", getName().c_str());
182 }
183
184 // Allocate a sampler if necessary
185 if (!sampler_ && !infSampler_)
186 {
187 allocSampler();
188 }
189
190 OMPL_INFORM("%s: Starting planning with %u states already in datastructure", getName().c_str(), nn_->size());
191
192 if (!si_->getStateSpace()->isMetricSpace())
193 OMPL_WARN("%s: The state space (%s) is not metric and as a result the optimization objective may not satisfy "
194 "the triangle inequality. "
195 "You may need to disable rejection.",
196 getName().c_str(), si_->getStateSpace()->getName().c_str());
197
198 const base::ReportIntermediateSolutionFn intermediateSolutionCallback = pdef_->getIntermediateSolutionCallback();
199
200 Motion *solution = lastGoalMotion_;
201
202 Motion *approximation = nullptr;
203 double approximatedist = std::numeric_limits<double>::infinity();
204 bool sufficientlyShort = false;
205
206 auto *rmotion = new Motion(si_);
207 base::State *rstate = rmotion->state;
208 base::State *xstate = si_->allocState();
209 base::State *dstate;
210
211 Motion *motion;
212 Motion *nmotion;
213 Motion *nb;
214 Motion *min;
215 Motion *c;
216 bool feas;
217
218 unsigned int rewireTest = 0;
219 unsigned int statesGenerated = 0;
220
221 base::Cost incCost, cost;
222
223 if (solution)
224 OMPL_INFORM("%s: Starting planning with existing solution of cost %.5f", getName().c_str(),
225 solution->cost.value());
226
227 if (useKNearest_)
228 OMPL_INFORM("%s: Initial k-nearest value of %u", getName().c_str(),
229 (unsigned int)std::ceil(k_rrt_ * log((double)(nn_->size() + 1u))));
230 else
232 "%s: Initial rewiring radius of %.2f", getName().c_str(),
233 std::min(maxDistance_, r_rrt_ * std::pow(log((double)(nn_->size() + 1u)) / ((double)(nn_->size() + 1u)),
234 1 / (double)(si_->getStateDimension()))));
235
236 while (ptc == false)
237 {
238 iterations_++;
239
240 // Computes the RRG values for this iteration (number or radius of neighbors)
241 calculateRRG();
242
243 // sample random state (with goal biasing)
244 // Goal samples are only sampled until maxSampleCount() goals are in the tree, to prohibit duplicate goal
245 // states.
246 if (goal_s && goalMotions_.size() < goal_s->maxSampleCount() && rng_.uniform01() < goalBias_ &&
247 goal_s->canSample())
248 goal_s->sampleGoal(rstate);
249 else
250 {
251 // Attempt to generate a sample, if we fail (e.g., too many rejection attempts), skip the remainder of this
252 // loop and return to try again
253 if (!sampleUniform(rstate))
254 continue;
255 }
256
257 // find closest state in the tree
258 nmotion = nn_->nearest(rmotion);
259
260 if (intermediateSolutionCallback && si_->equalStates(nmotion->state, rstate))
261 continue;
262
263 dstate = rstate;
264
265 // find state to add to the tree
266 double d = si_->distance(nmotion->state, rstate);
267 if (d > maxDistance_)
268 {
269 si_->getStateSpace()->interpolate(nmotion->state, rstate, maxDistance_ / d, xstate);
270 dstate = xstate;
271 }
272
273 // Check if the motion between the nearest state and the state to add is valid
274 if (si_->checkMotion(nmotion->state, dstate))
275 {
276 // create a motion
277 motion = new Motion(si_);
278 si_->copyState(motion->state, dstate);
279 motion->parent = nmotion;
280 incCost = opt_->motionCost(nmotion->state, motion->state);
281 motion->cost = opt_->combineCosts(nmotion->cost, incCost);
282
283 // Find nearby neighbors of the new motion
284 getNeighbors(motion);
285
286 // find which one we connect the new state to
287 for (auto it = motion->nbh.begin(); it != motion->nbh.end();)
288 {
289 nb = it->first;
290 feas = it->second;
291
292 // Compute cost using nb as a parent
293 incCost = opt_->motionCost(nb->state, motion->state);
294 cost = opt_->combineCosts(nb->cost, incCost);
295 if (opt_->isCostBetterThan(cost, motion->cost))
296 {
297 // Check range and feasibility
298 if ((!useKNearest_ || distanceFunction(motion, nb) < maxDistance_) &&
299 si_->checkMotion(nb->state, motion->state))
300 {
301 // mark than the motino has been checked as valid
302 it->second = true;
303
304 motion->cost = cost;
305 motion->parent = nb;
306 ++it;
307 }
308 else
309 {
310 // Remove unfeasible neighbor from the list of neighbors
311 it = motion->nbh.erase(it);
312 }
313 }
314 else
315 {
316 ++it;
317 }
318 }
319
320 // Check if the vertex should included
321 if (!includeVertex(motion))
322 {
323 si_->freeState(motion->state);
324 delete motion;
325 continue;
326 }
327
328 // Update neighbor motions neighbor datastructure
329 for (auto it = motion->nbh.begin(); it != motion->nbh.end(); ++it)
330 {
331 it->first->nbh.emplace_back(motion, it->second);
332 }
333
334 // add motion to the tree
335 ++statesGenerated;
336 nn_->add(motion);
337 if (updateChildren_)
338 motion->parent->children.push_back(motion);
339
340 // add the new motion to the queue to propagate the changes
341 updateQueue(motion);
342
343 bool checkForSolution = false;
344
345 // Add the new motion to the goalMotion_ list, if it satisfies the goal
346 double distanceFromGoal;
347 if (goal->isSatisfied(motion->state, &distanceFromGoal))
348 {
349 goalMotions_.push_back(motion);
350 checkForSolution = true;
351 }
352
353 // Process the elements in the queue and rewire the tree until epsilon-optimality
354 while (!q_.empty())
355 {
356 // Get element to update
357 min = q_.top()->data;
358 // Remove element from the queue and NULL the handle so that we know it's not in the queue anymore
359 q_.pop();
360 min->handle = nullptr;
361
362 // Stop cost propagation if it is not in the relevant region
363 if (opt_->isCostBetterThan(bestCost_, mc_.costPlusHeuristic(min)))
364 break;
365
366 // Try min as a parent to optimize each neighbor
367 for (auto it = min->nbh.begin(); it != min->nbh.end();)
368 {
369 nb = it->first;
370 feas = it->second;
371
372 // Neighbor culling: removes neighbors farther than the neighbor radius
373 if ((!useKNearest_ || min->nbh.size() > rrg_k_) && distanceFunction(min, nb) > rrg_r_)
374 {
375 it = min->nbh.erase(it);
376 continue;
377 }
378
379 // Calculate cost of nb using min as a parent
380 incCost = opt_->motionCost(min->state, nb->state);
381 cost = opt_->combineCosts(min->cost, incCost);
382
383 // If cost improvement is better than epsilon
384 if (opt_->isCostBetterThan(opt_->combineCosts(cost, epsilonCost_), nb->cost))
385 {
386 if (nb->parent != min)
387 {
388 // changing parent, check feasibility
389 if (!feas)
390 {
391 feas = si_->checkMotion(nb->state, min->state);
392 if (!feas)
393 {
394 // Remove unfeasible neighbor from the list of neighbors
395 it = min->nbh.erase(it);
396 continue;
397 }
398 else
399 {
400 // mark than the motino has been checked as valid
401 it->second = true;
402 }
403 }
404 if (updateChildren_)
405 {
406 // Remove this node from its parent list
407 removeFromParent(nb);
408 // add it as a children of min
409 min->children.push_back(nb);
410 }
411 // Add this node to the new parent
412 nb->parent = min;
413 ++rewireTest;
414 }
415 nb->cost = cost;
416
417 // Add to the queue for more improvements
418 updateQueue(nb);
419
420 checkForSolution = true;
421 }
422 ++it;
423 }
424 if (updateChildren_)
425 {
426 // Propagatino of the cost to the children
427 for (auto it = min->children.begin(), end = min->children.end(); it != end; ++it)
428 {
429 c = *it;
430 incCost = opt_->motionCost(min->state, c->state);
431 cost = opt_->combineCosts(min->cost, incCost);
432 c->cost = cost;
433 // Add to the queue for more improvements
434 updateQueue(c);
435
436 checkForSolution = true;
437 }
438 }
439 }
440
441 // empty q and reset handles
442 while (!q_.empty())
443 {
444 q_.top()->data->handle = nullptr;
445 q_.pop();
446 }
447 q_.clear();
448
449 // Checking for solution or iterative improvement
450 if (checkForSolution)
451 {
452 bool updatedSolution = false;
453 for (auto &goalMotion : goalMotions_)
454 {
455 if (opt_->isCostBetterThan(goalMotion->cost, bestCost_))
456 {
457 if (opt_->isFinite(bestCost_) == false)
458 {
459 OMPL_INFORM("%s: Found an initial solution with a cost of %.2f in %u iterations (%u "
460 "vertices in the graph)",
461 getName().c_str(), goalMotion->cost.value(), iterations_, nn_->size());
462 }
463 bestCost_ = goalMotion->cost;
464 updatedSolution = true;
465 }
466
467 sufficientlyShort = opt_->isSatisfied(goalMotion->cost);
468 if (sufficientlyShort)
469 {
470 solution = goalMotion;
471 break;
472 }
473 else if (!solution || opt_->isCostBetterThan(goalMotion->cost, solution->cost))
474 {
475 solution = goalMotion;
476 updatedSolution = true;
477 }
478 }
479
480 if (updatedSolution)
481 {
482 if (intermediateSolutionCallback)
483 {
484 std::vector<const base::State *> spath;
485 Motion *intermediate_solution =
486 solution->parent; // Do not include goal state to simplify code.
487
488 // Push back until we find the start, but not the start itself
489 while (intermediate_solution->parent != nullptr)
490 {
491 spath.push_back(intermediate_solution->state);
492 intermediate_solution = intermediate_solution->parent;
493 }
494
495 intermediateSolutionCallback(this, spath, bestCost_);
496 }
497 }
498 }
499
500 // Checking for approximate solution (closest state found to the goal)
501 if (goalMotions_.size() == 0 && distanceFromGoal < approximatedist)
502 {
503 approximation = motion;
504 approximatedist = distanceFromGoal;
505 }
506 }
507
508 // terminate if a sufficient solution is found
509 if (solution && sufficientlyShort)
510 break;
511 }
512
513 bool approximate = (solution == nullptr);
514 bool addedSolution = false;
515 if (approximate)
516 solution = approximation;
517 else
518 lastGoalMotion_ = solution;
519
520 if (solution != nullptr)
521 {
522 ptc.terminate();
523 // construct the solution path
524 std::vector<Motion *> mpath;
525 while (solution != nullptr)
526 {
527 mpath.push_back(solution);
528 solution = solution->parent;
529 }
530
531 // set the solution path
532 auto path = std::make_shared<PathGeometric>(si_);
533 for (int i = mpath.size() - 1; i >= 0; --i)
534 path->append(mpath[i]->state);
535
536 // Add the solution path.
537 base::PlannerSolution psol(path);
538 psol.setPlannerName(getName());
539 if (approximate)
540 psol.setApproximate(approximatedist);
541 // Does the solution satisfy the optimization objective?
542 psol.setOptimized(opt_, bestCost_, sufficientlyShort);
543 pdef_->addSolutionPath(psol);
544
545 addedSolution = true;
546 }
547
548 si_->freeState(xstate);
549 if (rmotion->state)
550 si_->freeState(rmotion->state);
551 delete rmotion;
552
553 OMPL_INFORM("%s: Created %u new states. Checked %u rewire options. %u goal states in tree. Final solution cost "
554 "%.3f",
555 getName().c_str(), statesGenerated, rewireTest, goalMotions_.size(), bestCost_.value());
556
557 return {addedSolution, approximate};
558}
559
561{
562 // If x->handle is not NULL, x is already in the queue and needs to be update, otherwise it is inserted
563 if (x->handle != nullptr)
564 {
565 q_.update(x->handle);
566 }
567 else
568 {
569 x->handle = q_.insert(x);
570 }
571}
572
574{
575 for (auto it = m->parent->children.begin(); it != m->parent->children.end(); ++it)
576 {
577 if (*it == m)
578 {
579 m->parent->children.erase(it);
580 break;
581 }
582 }
583}
584
586{
587 auto cardDbl = static_cast<double>(nn_->size() + 1u);
588 rrg_k_ = std::ceil(k_rrt_ * log(cardDbl));
589 rrg_r_ = std::min(maxDistance_,
590 r_rrt_ * std::pow(log(cardDbl) / cardDbl, 1 / static_cast<double>(si_->getStateDimension())));
591}
592
594{
595 if (motion->nbh.size() > 0)
596 {
597 return;
598 }
599
600 std::vector<Motion *> nbh;
601 if (useKNearest_)
602 {
603 //- k-nearest RRT*
604 nn_->nearestK(motion, rrg_k_, nbh);
605 }
606 else
607 {
608 nn_->nearestR(motion, rrg_r_, nbh);
609 }
610
611 motion->nbh.resize(nbh.size());
612 std::transform(nbh.begin(), nbh.end(), motion->nbh.begin(),
613 [](Motion *m) { return std::pair<Motion *, bool>(m, false); });
614}
615
617{
618 switch (variant_)
619 {
620 case 1:
621 return opt_->isCostBetterThan(mc_.alphaCostPlusHeuristic(x, alpha_), opt_->infiniteCost()); // Always true?
622 case 2:
623 return opt_->isCostBetterThan(mc_.alphaCostPlusHeuristic(x->parent, alpha_), bestCost_);
624 case 3:
625 return opt_->isCostBetterThan(mc_.alphaCostPlusHeuristic(x, alpha_), bestCost_);
626 default: // no rejection
627 return true;
628 }
629}
630
632{
633 if (nn_)
634 {
635 std::vector<Motion *> motions;
636 nn_->list(motions);
637 for (auto &motion : motions)
638 {
639 if (motion->state)
640 si_->freeState(motion->state);
641 delete motion;
642 }
643 }
644}
645
647{
648 Planner::getPlannerData(data);
649
650 std::vector<Motion *> motions;
651 if (nn_)
652 nn_->list(motions);
653
654 if (lastGoalMotion_)
655 data.addGoalVertex(base::PlannerDataVertex(lastGoalMotion_->state));
656
657 for (auto &motion : motions)
658 {
659 if (motion->parent == nullptr)
660 data.addStartVertex(base::PlannerDataVertex(motion->state));
661 else
662 data.addEdge(base::PlannerDataVertex(motion->parent->state), base::PlannerDataVertex(motion->state));
663 }
664}
665
667{
668 if (static_cast<bool>(opt_) == true)
669 {
670 if (opt_->hasCostToGoHeuristic() == false)
671 {
672 OMPL_INFORM("%s: No cost-to-go heuristic set. Informed techniques will not work well.", getName().c_str());
673 }
674 }
675
676 // This option is mutually exclusive with setSampleRejection, assert that:
677 if (informedSampling == true && useRejectionSampling_ == true)
678 {
679 OMPL_ERROR("%s: InformedSampling and SampleRejection are mutually exclusive options.", getName().c_str());
680 }
681
682 // Check if we're changing the setting of informed sampling. If we are, we will need to create a new sampler, which
683 // we only want to do if one is already allocated.
684 if (informedSampling != useInformedSampling_)
685 {
686 // Store the value
687 useInformedSampling_ = informedSampling;
688
689 // If we currently have a sampler, we need to make a new one
690 if (sampler_ || infSampler_)
691 {
692 // Reset the samplers
693 sampler_.reset();
694 infSampler_.reset();
695
696 // Create the sampler
697 allocSampler();
698 }
699 }
700}
701
703{
704 if (static_cast<bool>(opt_) == true)
705 {
706 if (opt_->hasCostToGoHeuristic() == false)
707 {
708 OMPL_INFORM("%s: No cost-to-go heuristic set. Informed techniques will not work well.", getName().c_str());
709 }
710 }
711
712 // This option is mutually exclusive with setSampleRejection, assert that:
713 if (reject == true && useInformedSampling_ == true)
714 {
715 OMPL_ERROR("%s: InformedSampling and SampleRejection are mutually exclusive options.", getName().c_str());
716 }
717
718 // Check if we're changing the setting of rejection sampling. If we are, we will need to create a new sampler, which
719 // we only want to do if one is already allocated.
720 if (reject != useRejectionSampling_)
721 {
722 // Store the setting
723 useRejectionSampling_ = reject;
724
725 // If we currently have a sampler, we need to make a new one
726 if (sampler_ || infSampler_)
727 {
728 // Reset the samplers
729 sampler_.reset();
730 infSampler_.reset();
731
732 // Create the sampler
733 allocSampler();
734 }
735 }
736}
737
739{
740 // Allocate the appropriate type of sampler.
741 if (useInformedSampling_)
742 {
743 // We are using informed sampling, this can end-up reverting to rejection sampling in some cases
744 OMPL_INFORM("%s: Using informed sampling.", getName().c_str());
745 infSampler_ = opt_->allocInformedStateSampler(pdef_, numSampleAttempts_);
746 }
747 else if (useRejectionSampling_)
748 {
749 // We are explicitly using rejection sampling.
750 OMPL_INFORM("%s: Using rejection sampling.", getName().c_str());
751 infSampler_ = std::make_shared<base::RejectionInfSampler>(pdef_, numSampleAttempts_);
752 }
753 else
754 {
755 // We are using a regular sampler
756 sampler_ = si_->allocStateSampler();
757 }
758}
759
761{
762 // Use the appropriate sampler
763 if (useInformedSampling_ || useRejectionSampling_)
764 {
765 // Attempt the focused sampler and return the result.
766 // If bestCost is changing a lot by small amounts, this could
767 // be prunedCost_ to reduce the number of times the informed sampling
768 // transforms are recalculated.
769 return infSampler_->sampleUniform(statePtr, bestCost_);
770 }
771 else
772 {
773 // Simply return a state from the regular sampler
774 sampler_->sampleUniform(statePtr);
775
776 // Always true
777 return true;
778 }
779}
780
782{
783 auto dimDbl = static_cast<double>(si_->getStateDimension());
784
785 // k_rrt > 2^(d + 1) * e * (1 + 1 / d). K-nearest RRT*
786 k_rrt_ = rewireFactor_ * (std::pow(2, dimDbl + 1) * boost::math::constants::e<double>() * (1.0 + 1.0 / dimDbl));
787
788 // r_rrt > (2*(1+1/d))^(1/d)*(measure/ballvolume)^(1/d)
789 r_rrt_ = rewireFactor_ *
790 std::pow(2 * (1.0 + 1.0 / dimDbl) * (si_->getSpaceMeasure() / unitNBallMeasure(si_->getStateDimension())),
791 1.0 / dimDbl);
792}
This class provides an implementation of an updatable min-heap. Using it is a bit cumbersome,...
Definition: BinaryHeap.h:53
Definition of a cost value. Can represent the cost of a motion or the cost of a state.
Definition: Cost.h:48
double value() const
The value of the cost.
Definition: Cost.h:56
Abstract definition of a goal region that can be sampled.
Abstract definition of goals.
Definition: Goal.h:63
virtual bool isSatisfied(const State *st) const =0
Return true if the state satisfies the goal constraints.
Base class for a vertex in the PlannerData structure. All derived classes must implement the clone an...
Definition: PlannerData.h:59
Object containing planner generated vertex and edge data. It is assumed that all vertices are unique,...
Definition: PlannerData.h:175
unsigned int addStartVertex(const PlannerDataVertex &v)
Adds the given vertex to the graph data, and marks it as a start vertex. The vertex index is returned...
unsigned int addGoalVertex(const PlannerDataVertex &v)
Adds the given vertex to the graph data, and marks it as a start vertex. The vertex index is returned...
virtual bool addEdge(unsigned int v1, unsigned int v2, const PlannerDataEdge &edge=PlannerDataEdge(), Cost weight=Cost(1.0))
Adds a directed edge between the given vertex indexes. An optional edge structure and weight can be s...
Encapsulate a termination condition for a motion planner. Planners will call operator() to decide whe...
void terminate() const
Notify that the condition for termination should become true, regardless of what eval() returns....
Definition of an abstract state.
Definition: State.h:50
Representation of a motion (node of the tree)
Definition: RRTXstatic.h:327
BinaryHeap< Motion *, MotionCompare >::Element * handle
Handle to identify the motion in the queue.
Definition: RRTXstatic.h:354
std::vector< Motion * > children
The set of motions descending from the current motion.
Definition: RRTXstatic.h:347
std::vector< std::pair< Motion *, bool > > nbh
The set of neighbors of this motion with a boolean indicating if the feasibility of edge as been test...
Definition: RRTXstatic.h:351
Motion * parent
The parent motion in the exploration tree.
Definition: RRTXstatic.h:341
base::State * state
The state contained by the motion.
Definition: RRTXstatic.h:338
base::Cost cost
The cost up to this motion.
Definition: RRTXstatic.h:344
void freeMemory()
Free the memory allocated by this planner.
Definition: RRTXstatic.cpp:631
void getPlannerData(base::PlannerData &data) const override
Get information about the current run of the motion planner. Repeated calls to this function will upd...
Definition: RRTXstatic.cpp:646
bool includeVertex(const Motion *x) const
Test if the vertex should be included according to the variant in use.
Definition: RRTXstatic.cpp:616
void updateQueue(Motion *x)
Update (or add) a motion in the queue.
Definition: RRTXstatic.cpp:560
void calculateRRG()
Calculate the rrg_r_ and rrg_k_ terms.
Definition: RRTXstatic.cpp:585
void setup() override
Perform extra configuration steps, if needed. This call will also issue a call to ompl::base::SpaceIn...
Definition: RRTXstatic.cpp:89
void allocSampler()
Create the samplers.
Definition: RRTXstatic.cpp:738
void removeFromParent(Motion *m)
Removes the given motion from the parent's child list.
Definition: RRTXstatic.cpp:573
void setSampleRejection(bool reject)
Controls whether heuristic rejection is used on samples (e.g., x_rand)
Definition: RRTXstatic.cpp:702
void getNeighbors(Motion *motion) const
Gets the neighbours of a given motion, using either k-nearest of radius as appropriate.
Definition: RRTXstatic.cpp:593
bool sampleUniform(base::State *statePtr)
Generate a sample.
Definition: RRTXstatic.cpp:760
void clear() override
Clear all internal datastructures. Planner settings are not affected. Subsequent calls to solve() wil...
Definition: RRTXstatic.cpp:138
base::PlannerStatus solve(const base::PlannerTerminationCondition &ptc) override
Function that can solve the motion planning problem. This function can be called multiple times on th...
Definition: RRTXstatic.cpp:155
void calculateRewiringLowerBounds()
Calculate the k_RRG* and r_RRG* terms.
Definition: RRTXstatic.cpp:781
void setInformedSampling(bool informedSampling)
Use direct sampling of the heuristic for the generation of random samples (e.g., x_rand)....
Definition: RRTXstatic.cpp:666
This class contains methods that automatically configure various parameters for motion planning....
Definition: SelfConfig.h:60
void configurePlannerRange(double &range)
Compute what a good length for motion segments is.
Definition: SelfConfig.cpp:225
#define OMPL_INFORM(fmt,...)
Log a formatted information string.
Definition: Console.h:68
#define OMPL_ERROR(fmt,...)
Log a formatted error string.
Definition: Console.h:64
#define OMPL_WARN(fmt,...)
Log a formatted warning string.
Definition: Console.h:66
std::function< void(const Planner *, const std::vector< const base::State * > &, const Cost)> ReportIntermediateSolutionFn
When a planner has an intermediate solution (e.g., optimizing planners), a function with this signatu...
double unitNBallMeasure(unsigned int N)
The Lebesgue measure (i.e., "volume") of an n-dimensional ball with a unit radius.
Representation of a solution to a planning problem.
void setApproximate(double difference)
Specify that the solution is approximate and set the difference to the goal.
void setPlannerName(const std::string &name)
Set the name of the planner used to compute this solution.
void setOptimized(const OptimizationObjectivePtr &opt, Cost cost, bool meetsObjective)
Set the optimization objective used to optimize this solution, the cost of the solution and whether i...
A class to store the exit status of Planner::solve()
Definition: PlannerStatus.h:49
@ INVALID_START
Invalid start state or no start state specified.
Definition: PlannerStatus.h:56
Defines the operator to compare motions.
Definition: RRTXstatic.h:291