12 #include <ompl/base/State.h> 13 #include <ompl/base/spaces/DubinsStateSpace.h> 14 #include <ompl/base/spaces/ReedsSheppStateSpace.h> 15 #include <ompl/base/spaces/SE2StateSpace.h> 17 #include <boost/functional/hash.hpp> 18 #include <boost/geometry.hpp> 19 #include <boost/geometry/algorithms/intersection.hpp> 20 #include <boost/geometry/geometries/linestring.hpp> 21 #include <boost/geometry/geometries/point_xy.hpp> 22 #include <boost/heap/fibonacci_heap.hpp> 23 #include <unordered_map> 24 #include <unordered_set> 32 static float deltat = 6.75 * 6 / 180.0 * M_PI;
35 static float penaltyTurning = 1.5;
38 static float penaltyReversing = 2.0;
41 static float penaltyCOD = 2.0;
43 static float mapResolution = 2.0;
45 static float xyResolution = r * deltat;
46 static float yawResolution = deltat;
49 static float carWidth = 2.0;
51 static float LF = 2.0;
53 static float LB = 1.0;
55 static float obsRadius = 1;
57 static int constraintWaitTime = 2;
60 std::vector<double>
dyaw = {0, deltat, -deltat, 0, -deltat, deltat};
61 std::vector<double>
dx = {r * deltat, r *sin(deltat), r *sin(deltat),
62 -r *deltat, -r *sin(deltat), -r *sin(deltat)};
63 std::vector<double>
dy = {0, -r *(1 - cos(deltat)), r *(1 - cos(deltat)),
64 0, -r *(1 - cos(deltat)), r *(1 - cos(deltat))};
66 static inline float normalizeHeadingRad(
float t) {
68 t = t - 2.f * M_PI * (int)(t / (2.f * M_PI));
69 return 2.f * M_PI + t;
72 return t - 2.f * M_PI * (int)(t / (2.f * M_PI));
81 typedef ompl::base::SE2StateSpace::StateType
OmplState;
82 typedef boost::geometry::model::d2::point_xy<double>
Point;
83 typedef boost::geometry::model::segment<Point>
Segment;
95 template <
typename Location,
typename State,
typename Action,
typename Cost,
96 typename Conflict,
typename Constraint,
typename Constraints>
99 Environment(
size_t maxx,
size_t maxy, std::unordered_set<Location> obstacles,
100 std::multimap<int, State> dynamic_obstacles,
101 std::vector<State> goals)
102 : m_obstacles(std::move(obstacles)),
103 m_dynamic_obstacles(std::move(dynamic_obstacles)),
105 m_constraints(nullptr),
106 m_lastGoalConstraint(-1),
107 m_highLevelExpanded(0),
108 m_lowLevelExpanded(0) {
109 m_dimx = (int)maxx / Constants::mapResolution;
110 m_dimy = (int)maxy / Constants::mapResolution;
113 holonomic_cost_maps = std::vector<std::vector<std::vector<double>>>(
114 goals.size(), std::vector<std::vector<double>>(
115 m_dimx, std::vector<double>(m_dimy, 0)));
117 for (
const auto &g : goals) {
118 if (g.x < 0 || g.x > maxx || g.y < 0 || g.y > maxy) {
119 std::cout <<
"\033[1m\033[31m Goal out of boundary, Fail to build " 120 "environment \033[0m\n";
123 m_goals.emplace_back(
124 State(g.x, g.y, Constants::normalizeHeadingRad(g.yaw)));
137 for (
const auto &sol : solution) {
138 max_t = std::max<int>(max_t, sol.states.size() - 1);
140 for (
int t = 0; t < max_t; ++t) {
142 for (
size_t i = 0; i < solution.size(); ++i) {
143 State state1 = getState(i, solution, t);
144 for (
size_t j = i + 1; j < solution.size(); ++j) {
145 State state2 = getState(j, solution, t);
146 if (state1.agentCollision(state2)) {
161 const Conflict &conflict, std::map<size_t, Constraints> &constraints) {
163 c1.constraints.emplace(
164 Constraint(conflict.time, conflict.s2, conflict.agent2));
165 constraints[conflict.agent1] = c1;
167 c2.constraints.emplace(
168 Constraint(conflict.time, conflict.s1, conflict.agent1));
169 constraints[conflict.agent2] = c2;
173 m_highLevelExpanded++;
174 if (m_highLevelExpanded % 50 == 0)
175 std::cout <<
"Now expand " << m_highLevelExpanded
176 <<
" high level nodes.\n";
184 m_agentIdx = agentIdx;
185 m_constraints = constraints;
186 m_lastGoalConstraint = -1;
187 for (
const auto &c : constraints->constraints) {
188 if (m_goals[m_agentIdx].agentCollision(c.s)) {
189 m_lastGoalConstraint = std::max(m_lastGoalConstraint, c.time);
201 ompl::base::ReedsSheppStateSpace reedsSheppPath(Constants::r);
202 OmplState *rsStart = (OmplState *)reedsSheppPath.allocState();
203 OmplState *rsEnd = (OmplState *)reedsSheppPath.allocState();
204 rsStart->setXY(s.x, s.y);
205 rsStart->setYaw(s.yaw);
206 rsEnd->setXY(m_goals[m_agentIdx].x, m_goals[m_agentIdx].y);
207 rsEnd->setYaw(m_goals[m_agentIdx].yaw);
208 double reedsSheppCost = reedsSheppPath.distance(rsStart, rsEnd);
211 double euclideanCost = sqrt(pow(m_goals[m_agentIdx].x - s.x, 2) +
212 pow(m_goals[m_agentIdx].y - s.y, 2));
216 sqrt(pow((s.x - (
int)s.x) -
217 (m_goals[m_agentIdx].x - (
int)m_goals[m_agentIdx].x),
219 pow((s.y - (
int)s.y) -
220 (m_goals[m_agentIdx].y - (
int)m_goals[m_agentIdx].y),
223 holonomic_cost_maps[m_agentIdx][(int)s.x / Constants::mapResolution]
224 [(
int)s.y / Constants::mapResolution] -
228 return std::max({reedsSheppCost, euclideanCost, twoDCost});
233 const State &state,
double gscore,
234 std::unordered_map<State, std::tuple<State, Action, double, double>,
235 std::hash<State>> &_camefrom) {
236 double goal_distance =
237 sqrt(pow(state.x - getGoal().x, 2) + pow(state.y - getGoal().y, 2));
238 if (goal_distance > 3 * (Constants::LB + Constants::LF))
return false;
239 ompl::base::ReedsSheppStateSpace reedsSheppSpace(Constants::r);
240 OmplState *rsStart = (OmplState *)reedsSheppSpace.allocState();
241 OmplState *rsEnd = (OmplState *)reedsSheppSpace.allocState();
242 rsStart->setXY(state.x, state.y);
243 rsStart->setYaw(-state.yaw);
244 rsEnd->setXY(getGoal().x, getGoal().y);
245 rsEnd->setYaw(-getGoal().yaw);
246 ompl::base::ReedsSheppStateSpace::ReedsSheppPath reedsShepppath =
247 reedsSheppSpace.reedsShepp(rsStart, rsEnd);
249 std::vector<State> path;
250 std::unordered_map<State, std::tuple<State, Action, double, double>,
254 path.emplace_back(state);
255 for (
auto pathidx = 0; pathidx < 5; pathidx++) {
256 if (fabs(reedsShepppath.length_[pathidx]) < 1e-6)
continue;
257 double deltat = 0,
dx = 0, act = 0, cost = 0;
258 switch (reedsShepppath.type_[pathidx]) {
263 deltat = -reedsShepppath.length_[pathidx];
264 dx = Constants::r * sin(-deltat);
267 cost = reedsShepppath.length_[pathidx] * Constants::r *
268 Constants::penaltyTurning;
272 dx = reedsShepppath.length_[pathidx] * Constants::r;
278 deltat = reedsShepppath.length_[pathidx];
279 dx = Constants::r * sin(deltat);
282 cost = reedsShepppath.length_[pathidx] * Constants::r *
283 Constants::penaltyTurning;
286 std::cout <<
"\033[1m\033[31m" 287 <<
"Warning: Receive unknown ReedsSheppPath type" 292 cost = -cost * Constants::penaltyReversing;
295 State s = path.back();
296 std::vector<std::pair<State, double>> next_path;
297 if (generatePath(s, act, deltat,
dx, next_path)) {
298 for (
auto iter = next_path.begin(); iter != next_path.end(); iter++) {
299 State next_s = iter->first;
300 gscore += iter->second;
301 if (!(next_s == path.back())) {
302 cameFrom.insert(std::make_pair<>(
304 std::make_tuple<>(path.back(), act, iter->second, gscore)));
306 path.emplace_back(next_s);
313 if (path.back().time <= m_lastGoalConstraint) {
317 m_goals[m_agentIdx] = path.back();
319 _camefrom.insert(cameFrom.begin(), cameFrom.end());
327 for (Action act = 0; act < 6; act++) {
328 double xSucc, ySucc, yawSucc;
334 yawSucc = Constants::normalizeHeadingRad(s.yaw +
Constants::dyaw[act]);
344 g = g * Constants::penaltyTurning;
346 if ((act < 3 && action >= 3) || (action < 3 && act >= 3)) {
348 g = g * Constants::penaltyCOD;
351 g = g * Constants::penaltyReversing;
353 State tempState(xSucc, ySucc, yawSucc, s.time + 1);
354 if (stateValid(tempState)) {
355 neighbors.emplace_back(
361 State tempState(s.x, s.y, s.yaw, s.time + 1);
362 if (stateValid(tempState)) {
366 State
getGoal() {
return m_goals[m_agentIdx]; }
368 return (uint64_t)s.time * (2 * M_PI / Constants::deltat) *
369 (m_dimx / Constants::xyResolution) *
370 (m_dimy / Constants::xyResolution) +
371 (uint64_t)(Constants::normalizeHeadingRad(s.yaw) /
372 Constants::yawResolution) *
373 (m_dimx / Constants::xyResolution) *
374 (m_dimy / Constants::xyResolution) +
375 (uint64_t)(s.y / Constants::xyResolution) *
376 (m_dimx / Constants::xyResolution) +
377 (uint64_t)(s.x / Constants::xyResolution);
382 m_lowLevelExpanded++;
388 const int batchsize) {
389 assert(m_goals.size() == m_starts.size());
390 for (
size_t i = 0; i < m_goals.size(); i++)
391 for (
size_t j = i + 1; j < m_goals.size(); j++) {
392 if (m_goals[i].agentCollision(m_goals[j])) {
393 std::cout <<
"ERROR: Goal point of " << i + iter * batchsize <<
" & " 394 << j + iter * batchsize <<
" collide!\n";
397 if (m_starts[i].agentCollision(m_starts[j])) {
398 std::cout <<
"ERROR: Start point of " << i + iter * batchsize <<
" & " 399 << j + iter * batchsize <<
" collide!\n";
407 State getState(
size_t agentIdx,
410 assert(agentIdx < solution.size());
411 if (t < solution[agentIdx].states.size()) {
412 return solution[agentIdx].states[t].first;
414 assert(!solution[agentIdx].states.empty());
415 return solution[agentIdx].states.back().first;
418 bool stateValid(
const State &s) {
419 double x_ind = s.x / Constants::mapResolution;
420 double y_ind = s.y / Constants::mapResolution;
421 if (x_ind < 0 || x_ind >= m_dimx || y_ind < 0 || y_ind >= m_dimy)
424 for (
auto it = m_obstacles.begin(); it != m_obstacles.end(); it++) {
425 if (s.obsCollision(*it))
return false;
428 auto it = m_dynamic_obstacles.equal_range(s.time);
429 for (
auto itr = it.first; itr != it.second; ++itr) {
430 if (s.agentCollision(itr->second))
return false;
432 auto itlow = m_dynamic_obstacles.lower_bound(-s.time);
433 auto itup = m_dynamic_obstacles.upper_bound(-1);
434 for (
auto it = itlow; it != itup; ++it)
435 if (s.agentCollision(it->second))
return false;
437 for (
auto it = m_constraints->constraints.begin();
438 it != m_constraints->constraints.end(); it++) {
439 if (!it->satisfyConstraint(s))
return false;
446 struct compare_node {
447 bool operator()(
const std::pair<State, double> &n1,
448 const std::pair<State, double> &n2)
const {
449 return (n1.second > n2.second);
452 void updateCostmap() {
453 boost::heap::fibonacci_heap<std::pair<State, double>,
454 boost::heap::compare<compare_node>>
457 std::set<std::pair<int, int>> temp_obs_set;
458 for (
auto it = m_obstacles.begin(); it != m_obstacles.end(); it++) {
460 std::make_pair((
int)it->x / Constants::mapResolution,
461 (
int)it->y / Constants::mapResolution));
464 for (
size_t idx = 0; idx < m_goals.size(); idx++) {
466 int goal_x = (int)m_goals[idx].x / Constants::mapResolution;
467 int goal_y = (int)m_goals[idx].y / Constants::mapResolution;
468 heap.push(std::make_pair(State(goal_x, goal_y, 0), 0));
470 while (!heap.empty()) {
471 std::pair<State, double> node = heap.top();
474 int x = node.first.x;
475 int y = node.first.y;
476 for (
int dx = -1;
dx <= 1;
dx++)
477 for (
int dy = -1;
dy <= 1;
dy++) {
478 if (
dx == 0 &&
dy == 0)
continue;
481 if (new_x == goal_x && new_y == goal_y)
continue;
482 if (new_x >= 0 && new_x < m_dimx && new_y >= 0 && new_y < m_dimy &&
483 holonomic_cost_maps[idx][new_x][new_y] == 0 &&
484 temp_obs_set.find(std::make_pair(new_x, new_y)) ==
485 temp_obs_set.end()) {
486 holonomic_cost_maps[idx][new_x][new_y] =
487 holonomic_cost_maps[idx][x][y] +
488 sqrt(pow(dx * Constants::mapResolution, 2) +
489 pow(dy * Constants::mapResolution, 2));
490 heap.push(std::make_pair(State(new_x, new_y, 0),
491 holonomic_cost_maps[idx][new_x][new_y]));
508 bool generatePath(State startState,
int act,
double deltaSteer,
510 std::vector<std::pair<State, double>> &result) {
511 double xSucc, ySucc, yawSucc,
dx,
dy,
dyaw, ratio;
512 result.emplace_back(std::make_pair<>(startState, 0));
513 if (act == 0 || act == 3) {
514 for (
size_t i = 0; i < (size_t)(deltaLength /
Constants::dx[act]); i++) {
515 State s = result.back().first;
520 yawSucc = Constants::normalizeHeadingRad(s.yaw +
Constants::dyaw[act]);
521 State nextState(xSucc, ySucc, yawSucc, result.back().first.time + 1);
522 if (!stateValid(nextState))
return false;
523 result.emplace_back(std::make_pair<>(nextState,
Constants::dx[0]));
525 ratio = (deltaLength -
532 for (
size_t i = 0; i < (size_t)(deltaSteer /
Constants::dyaw[act]); i++) {
533 State s = result.back().first;
538 yawSucc = Constants::normalizeHeadingRad(s.yaw +
Constants::dyaw[act]);
539 State nextState(xSucc, ySucc, yawSucc, result.back().first.time + 1);
540 if (!stateValid(nextState))
return false;
541 result.emplace_back(std::make_pair<>(
548 dx = Constants::r * sin(dyaw);
549 dy = -Constants::r * (1 - cos(dyaw));
550 if (act == 2 || act == 5) {
555 State s = result.back().first;
556 xSucc = s.x + dx * cos(-s.yaw) - dy * sin(-s.yaw);
557 ySucc = s.y + dx * sin(-s.yaw) + dy * cos(-s.yaw);
558 yawSucc = Constants::normalizeHeadingRad(s.yaw + dyaw);
560 State nextState(xSucc, ySucc, yawSucc, result.back().first.time + 1);
561 if (!stateValid(nextState))
return false;
562 result.emplace_back(std::make_pair<>(nextState, ratio *
Constants::dx[0]));
575 std::vector<std::vector<std::vector<double>>> holonomic_cost_maps;
576 std::unordered_set<Location> m_obstacles;
577 std::multimap<int, State> m_dynamic_obstacles;
578 std::vector<State> m_goals;
580 std::vector<double> m_vel_limit;
582 const Constraints *m_constraints;
583 int m_lastGoalConstraint;
584 int m_highLevelExpanded;
585 int m_lowLevelExpanded;
boost::geometry::model::d2::point_xy< double > Point
Definition: environment.hpp:82
std::vector< double > dx
Definition: environment.hpp:61
void onExpandHighLevelNode(int)
Definition: environment.hpp:172
std::vector< double > dyaw
Definition: environment.hpp:60
bool getFirstConflict(const std::vector< PlanResult< State, Action, double >> &solution, Conflict &result)
High Level Environment functions.
Definition: environment.hpp:133
ompl::base::SE2StateSpace::StateType OmplState
Definition: environment.hpp:81
int lowLevelExpanded() const
Definition: environment.hpp:385
void onExpandLowLevelNode(const State &, int, int)
Definition: environment.hpp:380
boost::geometry::model::segment< Point > Segment
Definition: environment.hpp:83
uint64_t calcIndex(const State &s)
Definition: environment.hpp:367
Environment class.
Definition: environment.hpp:97
bool startAndGoalValid(const std::vector< State > &m_starts, const size_t iter, const int batchsize)
Definition: environment.hpp:387
Represents state transations.
Definition: neighbor.hpp:24
Definition: cl_cbs.hpp:19
int admissibleHeuristic(const State &s)
Definition: environment.hpp:199
Environment(size_t maxx, size_t maxy, std::unordered_set< Location > obstacles, std::multimap< int, State > dynamic_obstacles, std::vector< State > goals)
Definition: environment.hpp:99
Definition: environment.hpp:29
std::vector< double > dy
Definition: environment.hpp:63
void createConstraintsFromConflict(const Conflict &conflict, std::map< size_t, Constraints > &constraints)
Definition: environment.hpp:160
State getGoal()
Definition: environment.hpp:366
int highLevelExpanded()
Definition: environment.hpp:179
Represents the path for an agent.
Definition: planresult.hpp:28
void setLowLevelContext(size_t agentIdx, const Constraints *constraints)
Low Level Environment functions.
Definition: environment.hpp:182
bool isSolution(const State &state, double gscore, std::unordered_map< State, std::tuple< State, Action, double, double >, std::hash< State >> &_camefrom)
Definition: environment.hpp:232
void getNeighbors(const State &s, Action action, std::vector< Neighbor< State, Action, double >> &neighbors)
Definition: environment.hpp:323