This commit is contained in:
axelrodR 2014-06-19 17:03:40 +03:00
commit e834dcda1e
12 changed files with 1114 additions and 185 deletions

View File

@ -33,10 +33,6 @@ void duDebugDrawHeightfieldLayer(duDebugDraw* dd, const struct rcHeightfieldLaye
void duDebugDrawHeightfieldLayers(duDebugDraw* dd, const struct rcHeightfieldLayerSet& lset);
void duDebugDrawHeightfieldLayersRegions(duDebugDraw* dd, const struct rcHeightfieldLayerSet& lset);
void duDebugDrawLayerContours(duDebugDraw* dd, const struct rcLayerContourSet& lcset);
void duDebugDrawLayerPolyMesh(duDebugDraw* dd, const struct rcLayerPolyMesh& lmesh);
void duDebugDrawRegionConnections(struct duDebugDraw* dd, const struct rcContourSet& cset, const float alpha = 1.0f);
void duDebugDrawRawContours(struct duDebugDraw* dd, const struct rcContourSet& cset, const float alpha = 1.0f);
void duDebugDrawContours(struct duDebugDraw* dd, const struct rcContourSet& cset, const float alpha = 1.0f);

View File

@ -41,6 +41,10 @@ class dtQueryFilter
public:
dtQueryFilter();
#ifdef DT_VIRTUAL_QUERYFILTER
virtual ~dtQueryFilter() { }
#endif
/// Returns true if the polygon can be visited. (I.e. Is traversable.)
/// @param[in] ref The reference id of the polygon test.
/// @param[in] tile The tile containing the polygon.

View File

@ -338,7 +338,7 @@ struct rcHeightfieldLayer
int maxy; ///< The maximum y-bounds of usable data. (Along the z-axis.)
int hmin; ///< The minimum height bounds of usable data. (Along the y-axis.)
int hmax; ///< The maximum height bounds of usable data. (Along the y-axis.)
unsigned char* heights; ///< The heightfield. [Size: (width - borderSize*2) * (h - borderSize*2)]
unsigned char* heights; ///< The heightfield. [Size: width * height]
unsigned char* areas; ///< Area ids. [Size: Same as #heights]
unsigned char* cons; ///< Packed neighbor connection information. [Size: Same as #heights]
};
@ -969,7 +969,7 @@ void rcMarkCylinderArea(rcContext* ctx, const float* pos,
/// @returns True if the operation completed successfully.
bool rcBuildDistanceField(rcContext* ctx, rcCompactHeightfield& chf);
/// Builds region data for the heightfield using watershed partitioning.
/// Builds region data for the heightfield using watershed partitioning.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in,out] chf A populated compact heightfield.
@ -983,6 +983,18 @@ bool rcBuildDistanceField(rcContext* ctx, rcCompactHeightfield& chf);
bool rcBuildRegions(rcContext* ctx, rcCompactHeightfield& chf,
const int borderSize, const int minRegionArea, const int mergeRegionArea);
/// Builds region data for the heightfield by partitioning the heightfield in non-overlapping layers.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in,out] chf A populated compact heightfield.
/// @param[in] borderSize The size of the non-navigable border around the heightfield.
/// [Limit: >=0] [Units: vx]
/// @param[in] minRegionArea The minimum number of cells allowed to form isolated island areas.
/// [Limit: >=0] [Units: vx].
/// @returns True if the operation completed successfully.
bool rcBuildLayerRegions(rcContext* ctx, rcCompactHeightfield& chf,
const int borderSize, const int minRegionArea);
/// Builds region data for the heightfield using simple monotone partitioning.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
@ -997,7 +1009,6 @@ bool rcBuildRegions(rcContext* ctx, rcCompactHeightfield& chf,
bool rcBuildRegionsMonotone(rcContext* ctx, rcCompactHeightfield& chf,
const int borderSize, const int minRegionArea, const int mergeRegionArea);
/// Sets the neighbor connection data for the specified direction.
/// @param[in] s The span to update.
/// @param[in] dir The direction to set. [Limits: 0 <= value < 4]

View File

@ -20,6 +20,7 @@
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "Recast.h"
#include "RecastAlloc.h"
#include "RecastAssert.h"
@ -311,13 +312,13 @@ static void simplifyContour(rcIntArray& points, rcIntArray& simplified,
{
int ii = (i+1) % (simplified.size()/4);
const int ax = simplified[i*4+0];
const int az = simplified[i*4+2];
const int ai = simplified[i*4+3];
int ax = simplified[i*4+0];
int az = simplified[i*4+2];
int ai = simplified[i*4+3];
const int bx = simplified[ii*4+0];
const int bz = simplified[ii*4+2];
const int bi = simplified[ii*4+3];
int bx = simplified[ii*4+0];
int bz = simplified[ii*4+2];
int bi = simplified[ii*4+3];
// Find maximum deviation from the segment.
float maxd = 0;
@ -338,6 +339,8 @@ static void simplifyContour(rcIntArray& points, rcIntArray& simplified,
cinc = pn-1;
ci = (bi+cinc) % pn;
endi = ai;
rcSwap(ax, bx);
rcSwap(az, bz);
}
// Tessellate only outer edges or edges between areas.
@ -469,32 +472,6 @@ static void simplifyContour(rcIntArray& points, rcIntArray& simplified,
}
static void removeDegenerateSegments(rcIntArray& simplified)
{
// Remove adjacent vertices which are equal on xz-plane,
// or else the triangulator will get confused.
for (int i = 0; i < simplified.size()/4; ++i)
{
int ni = i+1;
if (ni >= (simplified.size()/4))
ni = 0;
if (simplified[i*4+0] == simplified[ni*4+0] &&
simplified[i*4+2] == simplified[ni*4+2])
{
// Degenerate segment, remove.
for (int j = i; j < simplified.size()/4-1; ++j)
{
simplified[j*4+0] = simplified[(j+1)*4+0];
simplified[j*4+1] = simplified[(j+1)*4+1];
simplified[j*4+2] = simplified[(j+1)*4+2];
simplified[j*4+3] = simplified[(j+1)*4+3];
}
simplified.resize(simplified.size()-4);
}
}
}
static int calcAreaOfPolygon2D(const int* verts, const int nverts)
{
int area = 0;
@ -507,45 +484,146 @@ static int calcAreaOfPolygon2D(const int* verts, const int nverts)
return (area+1) / 2;
}
inline bool ileft(const int* a, const int* b, const int* c)
// TODO: these are the same as in RecastMesh.cpp, consider using the same.
inline int prev(int i, int n) { return i-1 >= 0 ? i-1 : n-1; }
inline int next(int i, int n) { return i+1 < n ? i+1 : 0; }
inline int area2(const int* a, const int* b, const int* c)
{
return (b[0] - a[0]) * (c[2] - a[2]) - (c[0] - a[0]) * (b[2] - a[2]) <= 0;
return (b[0] - a[0]) * (c[2] - a[2]) - (c[0] - a[0]) * (b[2] - a[2]);
}
static void getClosestIndices(const int* vertsa, const int nvertsa,
const int* vertsb, const int nvertsb,
int& ia, int& ib)
// Exclusive or: true iff exactly one argument is true.
// The arguments are negated to ensure that they are 0/1
// values. Then the bitwise Xor operator may apply.
// (This idea is due to Michael Baldwin.)
inline bool xorb(bool x, bool y)
{
int closestDist = 0xfffffff;
ia = -1, ib = -1;
for (int i = 0; i < nvertsa; ++i)
return !x ^ !y;
}
// Returns true iff c is strictly to the left of the directed
// line through a to b.
inline bool left(const int* a, const int* b, const int* c)
{
return area2(a, b, c) < 0;
}
inline bool leftOn(const int* a, const int* b, const int* c)
{
return area2(a, b, c) <= 0;
}
inline bool collinear(const int* a, const int* b, const int* c)
{
return area2(a, b, c) == 0;
}
// Returns true iff ab properly intersects cd: they share
// a point interior to both segments. The properness of the
// intersection is ensured by using strict leftness.
static bool intersectProp(const int* a, const int* b, const int* c, const int* d)
{
// Eliminate improper cases.
if (collinear(a,b,c) || collinear(a,b,d) ||
collinear(c,d,a) || collinear(c,d,b))
return false;
return xorb(left(a,b,c), left(a,b,d)) && xorb(left(c,d,a), left(c,d,b));
}
// Returns T iff (a,b,c) are collinear and point c lies
// on the closed segement ab.
static bool between(const int* a, const int* b, const int* c)
{
if (!collinear(a, b, c))
return false;
// If ab not vertical, check betweenness on x; else on y.
if (a[0] != b[0])
return ((a[0] <= c[0]) && (c[0] <= b[0])) || ((a[0] >= c[0]) && (c[0] >= b[0]));
else
return ((a[2] <= c[2]) && (c[2] <= b[2])) || ((a[2] >= c[2]) && (c[2] >= b[2]));
}
// Returns true iff segments ab and cd intersect, properly or improperly.
static bool intersect(const int* a, const int* b, const int* c, const int* d)
{
if (intersectProp(a, b, c, d))
return true;
else if (between(a, b, c) || between(a, b, d) ||
between(c, d, a) || between(c, d, b))
return true;
else
return false;
}
static bool vequal(const int* a, const int* b)
{
return a[0] == b[0] && a[2] == b[2];
}
static bool intersectSegCountour(const int* d0, const int* d1, int i, int n, const int* verts)
{
// For each edge (k,k+1) of P
for (int k = 0; k < n; k++)
{
const int in = (i+1) % nvertsa;
const int ip = (i+nvertsa-1) % nvertsa;
const int* va = &vertsa[i*4];
const int* van = &vertsa[in*4];
const int* vap = &vertsa[ip*4];
int k1 = next(k, n);
// Skip edges incident to i.
if (i == k || i == k1)
continue;
const int* p0 = &verts[k * 4];
const int* p1 = &verts[k1 * 4];
if (vequal(d0, p0) || vequal(d1, p0) || vequal(d0, p1) || vequal(d1, p1))
continue;
if (intersect(d0, d1, p0, p1))
return true;
}
return false;
}
static bool inCone(int i, int n, const int* verts, const int* pj)
{
const int* pi = &verts[i * 4];
const int* pi1 = &verts[next(i, n) * 4];
const int* pin1 = &verts[prev(i, n) * 4];
// If P[i] is a convex vertex [ i+1 left or on (i-1,i) ].
if (leftOn(pin1, pi, pi1))
return left(pi, pj, pin1) && left(pj, pi, pi1);
// Assume (i-1,i,i+1) not collinear.
// else P[i] is reflex.
return !(leftOn(pi, pj, pi1) && leftOn(pj, pi, pin1));
}
static void removeDegenerateSegments(rcIntArray& simplified)
{
// Remove adjacent vertices which are equal on xz-plane,
// or else the triangulator will get confused.
int npts = simplified.size()/4;
for (int i = 0; i < npts; ++i)
{
int ni = next(i, npts);
for (int j = 0; j < nvertsb; ++j)
if (vequal(&simplified[i*4], &simplified[ni*4]))
{
const int* vb = &vertsb[j*4];
// vb must be "infront" of va.
if (ileft(vap,va,vb) && ileft(va,van,vb))
// Degenerate segment, remove.
for (int j = i; j < simplified.size()/4-1; ++j)
{
const int dx = vb[0] - va[0];
const int dz = vb[2] - va[2];
const int d = dx*dx + dz*dz;
if (d < closestDist)
{
ia = i;
ib = j;
closestDist = d;
}
simplified[j*4+0] = simplified[(j+1)*4+0];
simplified[j*4+1] = simplified[(j+1)*4+1];
simplified[j*4+2] = simplified[(j+1)*4+2];
simplified[j*4+3] = simplified[(j+1)*4+3];
}
simplified.resize(simplified.size()-4);
npts--;
}
}
}
static bool mergeContours(rcContour& ca, rcContour& cb, int ia, int ib)
{
const int maxVerts = ca.nverts + cb.nverts + 2;
@ -590,6 +668,167 @@ static bool mergeContours(rcContour& ca, rcContour& cb, int ia, int ib)
return true;
}
struct rcContourHole
{
rcContour* contour;
int minx, minz, leftmost;
};
struct rcContourRegion
{
rcContour* outline;
rcContourHole* holes;
int nholes;
};
struct rcPotentialDiagonal
{
int vert;
int dist;
};
// Finds the lowest leftmost vertex of a contour.
static void findLeftMostVertex(rcContour* contour, int* minx, int* minz, int* leftmost)
{
*minx = contour->verts[0];
*minz = contour->verts[2];
*leftmost = 0;
for (int i = 1; i < contour->nverts; i++)
{
const int x = contour->verts[i*4+0];
const int z = contour->verts[i*4+2];
if (x < *minx || (x == *minx && z < *minz))
{
*minx = x;
*minz = z;
*leftmost = i;
}
}
}
static int compareHoles(const void* va, const void* vb)
{
const rcContourHole* a = (const rcContourHole*)va;
const rcContourHole* b = (const rcContourHole*)vb;
if (a->minx == b->minx)
{
if (a->minz < b->minz)
return -1;
if (a->minz > b->minz)
return 1;
}
else
{
if (a->minx < b->minx)
return -1;
if (a->minx > b->minx)
return 1;
}
return 0;
}
static int compareDiagDist(const void* va, const void* vb)
{
const rcPotentialDiagonal* a = (const rcPotentialDiagonal*)va;
const rcPotentialDiagonal* b = (const rcPotentialDiagonal*)vb;
if (a->dist < b->dist)
return -1;
if (a->dist > b->dist)
return 1;
return 0;
}
static void mergeRegionHoles(rcContext* ctx, rcContourRegion& region)
{
// Sort holes from left to right.
for (int i = 0; i < region.nholes; i++)
findLeftMostVertex(region.holes[i].contour, &region.holes[i].minx, &region.holes[i].minz, &region.holes[i].leftmost);
qsort(region.holes, region.nholes, sizeof(rcContourHole), compareHoles);
int maxVerts = region.outline->nverts;
for (int i = 0; i < region.nholes; i++)
maxVerts += region.holes[i].contour->nverts;
rcScopedDelete<rcPotentialDiagonal> diags = (rcPotentialDiagonal*)rcAlloc(sizeof(rcPotentialDiagonal)*maxVerts, RC_ALLOC_TEMP);
if (!diags)
{
ctx->log(RC_LOG_WARNING, "mergeRegionHoles: Failed to allocated diags %d.", maxVerts);
return;
}
rcContour* outline = region.outline;
// Merge holes into the outline one by one.
for (int i = 0; i < region.nholes; i++)
{
rcContour* hole = region.holes[i].contour;
int index = -1;
int bestVertex = region.holes[i].leftmost;
for (int iter = 0; iter < hole->nverts; iter++)
{
// Find potential diagonals.
// The 'best' vertex must be in the cone described by 3 cosequtive vertices of the outline.
// ..o j-1
// |
// | * best
// |
// j o-----o j+1
// :
int ndiags = 0;
const int* corner = &hole->verts[bestVertex*4];
for (int j = 0; j < outline->nverts; j++)
{
if (inCone(j, outline->nverts, outline->verts, corner))
{
int dx = outline->verts[j*4+0] - corner[0];
int dz = outline->verts[j*4+2] - corner[2];
diags[ndiags].vert = j;
diags[ndiags].dist = dx*dx + dz*dz;
ndiags++;
}
}
// Sort potential diagonals by distance, we want to make the connection as short as possible.
qsort(diags, ndiags, sizeof(rcPotentialDiagonal), compareDiagDist);
// Find a diagonal that is not intersecting the outline not the remaining holes.
index = -1;
for (int j = 0; j < ndiags; j++)
{
const int* pt = &outline->verts[diags[j].vert*4];
bool intersect = intersectSegCountour(pt, corner, diags[i].vert, outline->nverts, outline->verts);
for (int k = i; k < region.nholes && !intersect; k++)
intersect |= intersectSegCountour(pt, corner, -1, region.holes[k].contour->nverts, region.holes[k].contour->verts);
if (!intersect)
{
index = diags[j].vert;
break;
}
}
// If found non-intersecting diagonal, stop looking.
if (index != -1)
break;
// All the potential diagonals for the current vertex were intersecting, try next vertex.
bestVertex = (bestVertex + 1) % hole->nverts;
}
if (index == -1)
{
ctx->log(RC_LOG_WARNING, "mergeHoles: Failed to find merge points for %p and %p.", region.outline, hole);
continue;
}
if (!mergeContours(*region.outline, *hole, index, bestVertex))
{
ctx->log(RC_LOG_WARNING, "mergeHoles: Failed to merge contours %p and %p.", region.outline, hole);
continue;
}
}
}
/// @par
///
/// The raw contours will match the region outlines exactly. The @p maxError and @p maxEdgeLen
@ -722,7 +961,7 @@ bool rcBuildContours(rcContext* ctx, rcCompactHeightfield& chf,
if (cset.nconts >= maxContours)
{
// Allocate more contours.
// This can happen when there are tiny holes in the heightfield.
// This happens when a region has holes.
const int oldMax = maxContours;
maxContours *= 2;
rcContour* newConts = (rcContour*)rcAlloc(sizeof(rcContour)*maxContours, RC_ALLOC_PERM);
@ -738,7 +977,7 @@ bool rcBuildContours(rcContext* ctx, rcCompactHeightfield& chf,
ctx->log(RC_LOG_WARNING, "rcBuildContours: Expanding max contours from %d to %d.", oldMax, maxContours);
}
rcContour* cont = &cset.conts[cset.nconts++];
cont->nverts = simplified.size()/4;
@ -779,70 +1018,95 @@ bool rcBuildContours(rcContext* ctx, rcCompactHeightfield& chf,
}
}
/* cont->cx = cont->cy = cont->cz = 0;
for (int i = 0; i < cont->nverts; ++i)
{
cont->cx += cont->verts[i*4+0];
cont->cy += cont->verts[i*4+1];
cont->cz += cont->verts[i*4+2];
}
cont->cx /= cont->nverts;
cont->cy /= cont->nverts;
cont->cz /= cont->nverts;*/
cont->reg = reg;
cont->area = area;
}
}
}
}
// Check and merge droppings.
// Sometimes the previous algorithms can fail and create several contours
// per area. This pass will try to merge the holes into the main region.
for (int i = 0; i < cset.nconts; ++i)
// Merge holes if needed.
if (cset.nconts > 0)
{
rcContour& cont = cset.conts[i];
// Check if the contour is would backwards.
if (calcAreaOfPolygon2D(cont.verts, cont.nverts) < 0)
// Calculate winding of all polygons.
rcScopedDelete<char> winding = (char*)rcAlloc(sizeof(char)*cset.nconts, RC_ALLOC_TEMP);
if (!winding)
{
// Find another contour which has the same region ID.
int mergeIdx = -1;
for (int j = 0; j < cset.nconts; ++j)
ctx->log(RC_LOG_ERROR, "rcBuildContours: Out of memory 'hole' (%d).", cset.nconts);
return false;
}
int nholes = 0;
for (int i = 0; i < cset.nconts; ++i)
{
rcContour& cont = cset.conts[i];
// If the contour is wound backwards, it is a hole.
winding[i] = calcAreaOfPolygon2D(cont.verts, cont.nverts) < 0 ? -1 : 1;
if (winding[i] < 0)
nholes++;
}
if (nholes > 0)
{
// Collect outline contour and holes contours per region.
// We assume that there is one outline and multiple holes.
const int nregions = chf.maxRegions+1;
rcScopedDelete<rcContourRegion> regions = (rcContourRegion*)rcAlloc(sizeof(rcContourRegion)*nregions, RC_ALLOC_TEMP);
if (!regions)
{
if (i == j) continue;
if (cset.conts[j].nverts && cset.conts[j].reg == cont.reg)
ctx->log(RC_LOG_ERROR, "rcBuildContours: Out of memory 'regions' (%d).", nregions);
return false;
}
memset(regions, 0, sizeof(rcContourRegion)*nregions);
rcScopedDelete<rcContourHole> holes = (rcContourHole*)rcAlloc(sizeof(rcContourHole)*cset.nconts, RC_ALLOC_TEMP);
if (!holes)
{
ctx->log(RC_LOG_ERROR, "rcBuildContours: Out of memory 'holes' (%d).", cset.nconts);
return false;
}
memset(holes, 0, sizeof(rcContourHole)*cset.nconts);
for (int i = 0; i < cset.nconts; ++i)
{
rcContour& cont = cset.conts[i];
// Positively would contours are outlines, negative holes.
if (winding[i] > 0)
{
// Make sure the polygon is correctly oriented.
if (calcAreaOfPolygon2D(cset.conts[j].verts, cset.conts[j].nverts))
{
mergeIdx = j;
break;
}
if (regions[cont.reg].outline)
ctx->log(RC_LOG_ERROR, "rcBuildContours: Multiple outlines for region %d.", cont.reg);
regions[cont.reg].outline = &cont;
}
else
{
regions[cont.reg].nholes++;
}
}
if (mergeIdx == -1)
int index = 0;
for (int i = 0; i < nregions; i++)
{
ctx->log(RC_LOG_WARNING, "rcBuildContours: Could not find merge target for bad contour %d.", i);
if (regions[i].nholes > 0)
{
regions[i].holes = &holes[index];
index += regions[i].nholes;
regions[i].nholes = 0;
}
}
else
for (int i = 0; i < cset.nconts; ++i)
{
rcContour& mcont = cset.conts[mergeIdx];
// Merge by closest points.
int ia = 0, ib = 0;
getClosestIndices(mcont.verts, mcont.nverts, cont.verts, cont.nverts, ia, ib);
if (ia == -1 || ib == -1)
{
ctx->log(RC_LOG_WARNING, "rcBuildContours: Failed to find merge points for %d and %d.", i, mergeIdx);
continue;
}
if (!mergeContours(mcont, cont, ia, ib))
{
ctx->log(RC_LOG_WARNING, "rcBuildContours: Failed to merge contours %d and %d.", i, mergeIdx);
continue;
}
rcContour& cont = cset.conts[i];
rcContourRegion& reg = regions[cont.reg];
if (winding[i] < 0)
reg.holes[reg.nholes++].contour = &cont;
}
// Finally merge each regions holes into the outline.
for (int i = 0; i < nregions; i++)
{
if (regions[i].nholes > 0)
mergeRegionHoles(ctx, regions[i]);
}
}
}
ctx->stopTimer(RC_TIMER_BUILD_CONTOURS);

View File

@ -38,7 +38,7 @@ struct rcLayerRegion
unsigned char layerId; // Layer ID
unsigned char nlayers; // Layer count
unsigned char nneis; // Neighbour count
unsigned char base; // Flag indicating if the region is hte base of merged regions.
unsigned char base; // Flag indicating if the region is the base of merged regions.
};

View File

@ -288,6 +288,53 @@ static bool diagonal(int i, int j, int n, const int* verts, int* indices)
return inCone(i, j, n, verts, indices) && diagonalie(i, j, n, verts, indices);
}
static bool diagonalieLoose(int i, int j, int n, const int* verts, int* indices)
{
const int* d0 = &verts[(indices[i] & 0x0fffffff) * 4];
const int* d1 = &verts[(indices[j] & 0x0fffffff) * 4];
// For each edge (k,k+1) of P
for (int k = 0; k < n; k++)
{
int k1 = next(k, n);
// Skip edges incident to i or j
if (!((k == i) || (k1 == i) || (k == j) || (k1 == j)))
{
const int* p0 = &verts[(indices[k] & 0x0fffffff) * 4];
const int* p1 = &verts[(indices[k1] & 0x0fffffff) * 4];
if (vequal(d0, p0) || vequal(d1, p0) || vequal(d0, p1) || vequal(d1, p1))
continue;
if (intersectProp(d0, d1, p0, p1))
return false;
}
}
return true;
}
static bool inConeLoose(int i, int j, int n, const int* verts, int* indices)
{
const int* pi = &verts[(indices[i] & 0x0fffffff) * 4];
const int* pj = &verts[(indices[j] & 0x0fffffff) * 4];
const int* pi1 = &verts[(indices[next(i, n)] & 0x0fffffff) * 4];
const int* pin1 = &verts[(indices[prev(i, n)] & 0x0fffffff) * 4];
// If P[i] is a convex vertex [ i+1 left or on (i-1,i) ].
if (leftOn(pin1, pi, pi1))
return leftOn(pi, pj, pin1) && leftOn(pj, pi, pi1);
// Assume (i-1,i,i+1) not collinear.
// else P[i] is reflex.
return !(leftOn(pi, pj, pi1) && leftOn(pj, pi, pin1));
}
static bool diagonalLoose(int i, int j, int n, const int* verts, int* indices)
{
return inConeLoose(i, j, n, verts, indices) && diagonalieLoose(i, j, n, verts, indices);
}
static int triangulate(int n, const int* verts, int* indices, int* tris)
{
int ntris = 0;
@ -328,14 +375,41 @@ static int triangulate(int n, const int* verts, int* indices, int* tris)
if (mini == -1)
{
// Should not happen.
/* printf("mini == -1 ntris=%d n=%d\n", ntris, n);
// We might get here because the contour has overlapping segments, like this:
//
// A o-o=====o---o B
// / |C D| \
// o o o o
// : : : :
// We'll try to recover by loosing up the inCone test a bit so that a diagonal
// like A-B or C-D can be found and we can continue.
minLen = -1;
mini = -1;
for (int i = 0; i < n; i++)
{
printf("%d ", indices[i] & 0x0fffffff);
int i1 = next(i, n);
int i2 = next(i1, n);
if (diagonalLoose(i, i2, n, verts, indices))
{
const int* p0 = &verts[(indices[i] & 0x0fffffff) * 4];
const int* p2 = &verts[(indices[next(i2, n)] & 0x0fffffff) * 4];
int dx = p2[0] - p0[0];
int dy = p2[2] - p0[2];
int len = dx*dx + dy*dy;
if (minLen < 0 || len < minLen)
{
minLen = len;
mini = i;
}
}
}
if (mini == -1)
{
// The contour is messed up. This sometimes happens
// if the contour simplification is too aggressive.
return -ntris;
}
printf("\n");*/
return -ntris;
}
int i = mini;

View File

@ -489,6 +489,99 @@ static void delaunayHull(rcContext* ctx, const int npts, const float* pts,
}
}
// Calculate minimum extend of the polygon.
static float polyMinExtent(const float* verts, const int nverts)
{
float minDist = FLT_MAX;
for (int i = 0; i < nverts; i++)
{
const int ni = (i+1) % nverts;
const float* p1 = &verts[i*3];
const float* p2 = &verts[ni*3];
float maxEdgeDist = 0;
for (int j = 0; j < nverts; j++)
{
if (j == i || j == ni) continue;
float d = distancePtSeg2d(&verts[j*3], p1,p2);
maxEdgeDist = rcMax(maxEdgeDist, d);
}
minDist = rcMin(minDist, maxEdgeDist);
}
return rcSqrt(minDist);
}
inline int next(int i, int n)
{
return (i+1) % n;
}
inline int prev(int i, int n)
{
return (i + n-1) % n;
}
static void triangulateHull(const int nverts, const float* verts, const int nhull, const int* hull, rcIntArray& tris)
{
int start = 0, left = 1, right = nhull-1;
// Start from shortest ear.
float dmin = FLT_MAX;
for (int i = 0; i < nhull; i++)
{
int pi = prev(i, nhull);
int ni = next(i, nhull);
const float* pv = &verts[hull[pi]*3];
const float* nv = &verts[hull[ni]*3];
const float d = vdistSq2(pv, nv);
if (d < dmin)
{
start = i;
left = ni;
right = pi;
dmin = d;
}
}
// Add first triangle
tris.push(hull[start]);
tris.push(hull[left]);
tris.push(hull[right]);
tris.push(0);
// Triangulate the polygon by adding the shortest diagonal
// by moving left or right.
while (next(left, nhull) != right)
{
// Check to see if se should advance left or right.
int nleft = next(left, nhull);
int nright = prev(right, nhull);
const float* cvleft = &verts[hull[left]*3];
const float* nvleft = &verts[hull[nleft]*3];
const float* cvright = &verts[hull[right]*3];
const float* nvright = &verts[hull[nright]*3];
const float dleft = vdistSq2(nvleft, cvright);
const float dright = vdistSq2(cvleft, nvright);
if (dleft < dright)
{
tris.push(hull[left]);
tris.push(hull[nleft]);
tris.push(hull[right]);
tris.push(0);
left = nleft;
}
else
{
tris.push(hull[left]);
tris.push(hull[nright]);
tris.push(hull[right]);
tris.push(0);
right = nright;
}
}
}
inline float getJitterX(const int i)
{
@ -518,9 +611,15 @@ static bool buildPolyDetail(rcContext* ctx, const float* in, const int nin,
for (int i = 0; i < nin; ++i)
rcVcopy(&verts[i*3], &in[i*3]);
nverts = nin;
edges.resize(0);
tris.resize(0);
const float cs = chf.cs;
const float ics = 1.0f/cs;
// Calculate minimum extents of the polygon based on input data.
float minExtent = polyMinExtent(verts, nverts);
// Tessellate outlines.
// This is done in separate pass in order to ensure
@ -627,25 +726,24 @@ static bool buildPolyDetail(rcContext* ctx, const float* in, const int nin,
}
}
}
// If the polygon minimum extent is small (sliver or small triangle), do not try to add internal points.
if (minExtent < sampleDist*2)
{
triangulateHull(nverts, verts, nhull, hull, tris);
return true;
}
// Tessellate the base mesh.
edges.resize(0);
tris.resize(0);
delaunayHull(ctx, nverts, verts, nhull, hull, tris, edges);
// We're using the triangulateHull instead of delaunayHull as it tends to
// create a bit better triangulation for long thing triangles when there
// are no internal points.
triangulateHull(nverts, verts, nhull, hull, tris);
if (tris.size() == 0)
{
// Could not triangulate the poly, make sure there is some valid data there.
ctx->log(RC_LOG_WARNING, "buildPolyDetail: Could not triangulate polygon, adding default data.");
for (int i = 2; i < nverts; ++i)
{
tris.push(0);
tris.push(i-1);
tris.push(i);
tris.push(0);
}
ctx->log(RC_LOG_WARNING, "buildPolyDetail: Could not triangulate polygon (%d verts).", nverts);
return true;
}

View File

@ -315,6 +315,7 @@ static bool floodRegion(int x, int y, int i,
srcReg[ci] = 0;
continue;
}
count++;
// Expand neighbours.
@ -516,7 +517,11 @@ struct rcRegion
id(i),
areaType(0),
remap(false),
visited(false)
visited(false),
overlap(false),
connectsToBorder(false),
ymin(0xffff),
ymax(0)
{}
int spanCount; // Number of spans belonging to this region
@ -524,6 +529,9 @@ struct rcRegion
unsigned char areaType; // Are type.
bool remap;
bool visited;
bool overlap;
bool connectsToBorder;
unsigned short ymin, ymax;
rcIntArray connections;
rcIntArray floors;
};
@ -768,10 +776,11 @@ static void walkContour(int x, int y, int i, int dir,
}
}
static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegionSize,
unsigned short& maxRegionId,
rcCompactHeightfield& chf,
unsigned short* srcReg)
static bool mergeAndFilterRegions(rcContext* ctx, int minRegionArea, int mergeRegionSize,
unsigned short& maxRegionId,
rcCompactHeightfield& chf,
unsigned short* srcReg, rcIntArray& overlaps)
{
const int w = chf.width;
const int h = chf.height;
@ -780,7 +789,7 @@ static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegio
rcRegion* regions = (rcRegion*)rcAlloc(sizeof(rcRegion)*nreg, RC_ALLOC_TEMP);
if (!regions)
{
ctx->log(RC_LOG_ERROR, "filterSmallRegions: Out of memory 'regions' (%d).", nreg);
ctx->log(RC_LOG_ERROR, "mergeAndFilterRegions: Out of memory 'regions' (%d).", nreg);
return false;
}
@ -803,7 +812,6 @@ static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegio
rcRegion& reg = regions[r];
reg.spanCount++;
// Update floors.
for (int j = (int)c.index; j < ni; ++j)
{
@ -811,6 +819,8 @@ static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegio
unsigned short floorId = srcReg[j];
if (floorId == 0 || floorId >= nreg)
continue;
if (floorId == r)
reg.overlap = true;
addUniqueFloorRegion(reg, floorId);
}
@ -906,7 +916,7 @@ static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegio
}
}
}
// Merge too small regions to neighbour regions.
int mergeCount = 0 ;
do
@ -916,7 +926,9 @@ static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegio
{
rcRegion& reg = regions[i];
if (reg.id == 0 || (reg.id & RC_BORDER_REG))
continue;
continue;
if (reg.overlap)
continue;
if (reg.spanCount == 0)
continue;
@ -933,7 +945,7 @@ static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegio
{
if (reg.connections[j] & RC_BORDER_REG) continue;
rcRegion& mreg = regions[reg.connections[j]];
if (mreg.id == 0 || (mreg.id & RC_BORDER_REG)) continue;
if (mreg.id == 0 || (mreg.id & RC_BORDER_REG) || mreg.overlap) continue;
if (mreg.spanCount < smallest &&
canMergeWithRegion(reg, mreg) &&
canMergeWithRegion(mreg, reg))
@ -997,6 +1009,224 @@ static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegio
}
maxRegionId = regIdGen;
// Remap regions.
for (int i = 0; i < chf.spanCount; ++i)
{
if ((srcReg[i] & RC_BORDER_REG) == 0)
srcReg[i] = regions[srcReg[i]].id;
}
// Return regions that we found to be overlapping.
for (int i = 0; i < nreg; ++i)
if (regions[i].overlap)
overlaps.push(regions[i].id);
for (int i = 0; i < nreg; ++i)
regions[i].~rcRegion();
rcFree(regions);
return true;
}
static void addUniqueConnection(rcRegion& reg, int n)
{
for (int i = 0; i < reg.connections.size(); ++i)
if (reg.connections[i] == n)
return;
reg.connections.push(n);
}
static bool mergeAndFilterLayerRegions(rcContext* ctx, int minRegionArea,
unsigned short& maxRegionId,
rcCompactHeightfield& chf,
unsigned short* srcReg, rcIntArray& overlaps)
{
const int w = chf.width;
const int h = chf.height;
const int nreg = maxRegionId+1;
rcRegion* regions = (rcRegion*)rcAlloc(sizeof(rcRegion)*nreg, RC_ALLOC_TEMP);
if (!regions)
{
ctx->log(RC_LOG_ERROR, "mergeAndFilterLayerRegions: Out of memory 'regions' (%d).", nreg);
return false;
}
// Construct regions
for (int i = 0; i < nreg; ++i)
new(&regions[i]) rcRegion((unsigned short)i);
// Find region neighbours and overlapping regions.
rcIntArray lregs(32);
for (int y = 0; y < h; ++y)
{
for (int x = 0; x < w; ++x)
{
const rcCompactCell& c = chf.cells[x+y*w];
lregs.resize(0);
for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i)
{
const rcCompactSpan& s = chf.spans[i];
const unsigned short ri = srcReg[i];
if (ri == 0 || ri >= nreg) continue;
rcRegion& reg = regions[ri];
reg.spanCount++;
reg.ymin = rcMin(reg.ymin, s.y);
reg.ymax = rcMax(reg.ymax, s.y);
// Collect all region layers.
lregs.push(ri);
// Update neighbours
for (int dir = 0; dir < 4; ++dir)
{
if (rcGetCon(s, dir) != RC_NOT_CONNECTED)
{
const int ax = x + rcGetDirOffsetX(dir);
const int ay = y + rcGetDirOffsetY(dir);
const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir);
const unsigned short rai = srcReg[ai];
if (rai > 0 && rai < nreg && rai != ri)
addUniqueConnection(reg, rai);
if (rai & RC_BORDER_REG)
reg.connectsToBorder = true;
}
}
}
// Update overlapping regions.
for (int i = 0; i < lregs.size()-1; ++i)
{
for (int j = i+1; j < lregs.size(); ++j)
{
if (lregs[i] != lregs[j])
{
rcRegion& ri = regions[lregs[i]];
rcRegion& rj = regions[lregs[j]];
addUniqueFloorRegion(ri, lregs[j]);
addUniqueFloorRegion(rj, lregs[i]);
}
}
}
}
}
// Create 2D layers from regions.
unsigned short layerId = 1;
for (int i = 0; i < nreg; ++i)
regions[i].id = 0;
// Merge montone regions to create non-overlapping areas.
rcIntArray stack(32);
for (int i = 1; i < nreg; ++i)
{
rcRegion& root = regions[i];
// Skip already visited.
if (root.id != 0)
continue;
// Start search.
root.id = layerId;
stack.resize(0);
stack.push(i);
while (stack.size() > 0)
{
// Pop front
rcRegion& reg = regions[stack[0]];
for (int j = 0; j < stack.size()-1; ++j)
stack[j] = stack[j+1];
stack.resize(stack.size()-1);
const int ncons = (int)reg.connections.size();
for (int j = 0; j < ncons; ++j)
{
const int nei = reg.connections[j];
rcRegion& regn = regions[nei];
// Skip already visited.
if (regn.id != 0)
continue;
// Skip if the neighbour is overlapping root region.
bool overlap = false;
for (int k = 0; k < root.floors.size(); k++)
{
if (root.floors[k] == nei)
{
overlap = true;
break;
}
}
if (overlap)
continue;
// Deepen
stack.push(nei);
// Mark layer id
regn.id = layerId;
// Merge current layers to root.
for (int k = 0; k < regn.floors.size(); ++k)
addUniqueFloorRegion(root, regn.floors[k]);
root.ymin = rcMin(root.ymin, regn.ymin);
root.ymax = rcMax(root.ymax, regn.ymax);
root.spanCount += regn.spanCount;
regn.spanCount = 0;
root.connectsToBorder = root.connectsToBorder || regn.connectsToBorder;
}
}
layerId++;
}
// Remove small regions
for (int i = 0; i < nreg; ++i)
{
if (regions[i].spanCount > 0 && regions[i].spanCount < minRegionArea && !regions[i].connectsToBorder)
{
unsigned short reg = regions[i].id;
for (int j = 0; j < nreg; ++j)
if (regions[j].id == reg)
regions[j].id = 0;
}
}
// Compress region Ids.
for (int i = 0; i < nreg; ++i)
{
regions[i].remap = false;
if (regions[i].id == 0) continue; // Skip nil regions.
if (regions[i].id & RC_BORDER_REG) continue; // Skip external regions.
regions[i].remap = true;
}
unsigned short regIdGen = 0;
for (int i = 0; i < nreg; ++i)
{
if (!regions[i].remap)
continue;
unsigned short oldId = regions[i].id;
unsigned short newId = ++regIdGen;
for (int j = i; j < nreg; ++j)
{
if (regions[j].id == oldId)
{
regions[j].id = newId;
regions[j].remap = false;
}
}
}
maxRegionId = regIdGen;
// Remap regions.
for (int i = 0; i < chf.spanCount; ++i)
{
@ -1011,6 +1241,8 @@ static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegio
return true;
}
/// @par
///
/// This is usually the second to the last step in creating a fully built
@ -1256,13 +1488,17 @@ bool rcBuildRegionsMonotone(rcContext* ctx, rcCompactHeightfield& chf,
}
}
ctx->startTimer(RC_TIMER_BUILD_REGIONS_FILTER);
// Filter out small regions.
// Merge regions and filter out small regions.
rcIntArray overlaps;
chf.maxRegions = id;
if (!filterSmallRegions(ctx, minRegionArea, mergeRegionArea, chf.maxRegions, chf, srcReg))
if (!mergeAndFilterRegions(ctx, minRegionArea, mergeRegionArea, chf.maxRegions, chf, srcReg, overlaps))
return false;
// Monotone partitioning does not generate overlapping regions.
ctx->stopTimer(RC_TIMER_BUILD_REGIONS_FILTER);
// Store the result out.
@ -1407,11 +1643,18 @@ bool rcBuildRegions(rcContext* ctx, rcCompactHeightfield& chf,
ctx->startTimer(RC_TIMER_BUILD_REGIONS_FILTER);
// Filter out small regions.
// Merge regions and filter out smalle regions.
rcIntArray overlaps;
chf.maxRegions = regionId;
if (!filterSmallRegions(ctx, minRegionArea, mergeRegionArea, chf.maxRegions, chf, srcReg))
if (!mergeAndFilterRegions(ctx, minRegionArea, mergeRegionArea, chf.maxRegions, chf, srcReg, overlaps))
return false;
// If overlapping regions were found during merging, split those regions.
if (overlaps.size() > 0)
{
ctx->log(RC_LOG_ERROR, "rcBuildRegions: %d overlapping regions.", overlaps.size());
}
ctx->stopTimer(RC_TIMER_BUILD_REGIONS_FILTER);
// Write the result out.
@ -1424,3 +1667,157 @@ bool rcBuildRegions(rcContext* ctx, rcCompactHeightfield& chf,
}
bool rcBuildLayerRegions(rcContext* ctx, rcCompactHeightfield& chf,
const int borderSize, const int minRegionArea)
{
rcAssert(ctx);
ctx->startTimer(RC_TIMER_BUILD_REGIONS);
const int w = chf.width;
const int h = chf.height;
unsigned short id = 1;
rcScopedDelete<unsigned short> srcReg = (unsigned short*)rcAlloc(sizeof(unsigned short)*chf.spanCount, RC_ALLOC_TEMP);
if (!srcReg)
{
ctx->log(RC_LOG_ERROR, "rcBuildRegionsMonotone: Out of memory 'src' (%d).", chf.spanCount);
return false;
}
memset(srcReg,0,sizeof(unsigned short)*chf.spanCount);
const int nsweeps = rcMax(chf.width,chf.height);
rcScopedDelete<rcSweepSpan> sweeps = (rcSweepSpan*)rcAlloc(sizeof(rcSweepSpan)*nsweeps, RC_ALLOC_TEMP);
if (!sweeps)
{
ctx->log(RC_LOG_ERROR, "rcBuildRegionsMonotone: Out of memory 'sweeps' (%d).", nsweeps);
return false;
}
// Mark border regions.
if (borderSize > 0)
{
// Make sure border will not overflow.
const int bw = rcMin(w, borderSize);
const int bh = rcMin(h, borderSize);
// Paint regions
paintRectRegion(0, bw, 0, h, id|RC_BORDER_REG, chf, srcReg); id++;
paintRectRegion(w-bw, w, 0, h, id|RC_BORDER_REG, chf, srcReg); id++;
paintRectRegion(0, w, 0, bh, id|RC_BORDER_REG, chf, srcReg); id++;
paintRectRegion(0, w, h-bh, h, id|RC_BORDER_REG, chf, srcReg); id++;
chf.borderSize = borderSize;
}
rcIntArray prev(256);
// Sweep one line at a time.
for (int y = borderSize; y < h-borderSize; ++y)
{
// Collect spans from this row.
prev.resize(id+1);
memset(&prev[0],0,sizeof(int)*id);
unsigned short rid = 1;
for (int x = borderSize; x < w-borderSize; ++x)
{
const rcCompactCell& c = chf.cells[x+y*w];
for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i)
{
const rcCompactSpan& s = chf.spans[i];
if (chf.areas[i] == RC_NULL_AREA) continue;
// -x
unsigned short previd = 0;
if (rcGetCon(s, 0) != RC_NOT_CONNECTED)
{
const int ax = x + rcGetDirOffsetX(0);
const int ay = y + rcGetDirOffsetY(0);
const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 0);
if ((srcReg[ai] & RC_BORDER_REG) == 0 && chf.areas[i] == chf.areas[ai])
previd = srcReg[ai];
}
if (!previd)
{
previd = rid++;
sweeps[previd].rid = previd;
sweeps[previd].ns = 0;
sweeps[previd].nei = 0;
}
// -y
if (rcGetCon(s,3) != RC_NOT_CONNECTED)
{
const int ax = x + rcGetDirOffsetX(3);
const int ay = y + rcGetDirOffsetY(3);
const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 3);
if (srcReg[ai] && (srcReg[ai] & RC_BORDER_REG) == 0 && chf.areas[i] == chf.areas[ai])
{
unsigned short nr = srcReg[ai];
if (!sweeps[previd].nei || sweeps[previd].nei == nr)
{
sweeps[previd].nei = nr;
sweeps[previd].ns++;
prev[nr]++;
}
else
{
sweeps[previd].nei = RC_NULL_NEI;
}
}
}
srcReg[i] = previd;
}
}
// Create unique ID.
for (int i = 1; i < rid; ++i)
{
if (sweeps[i].nei != RC_NULL_NEI && sweeps[i].nei != 0 &&
prev[sweeps[i].nei] == (int)sweeps[i].ns)
{
sweeps[i].id = sweeps[i].nei;
}
else
{
sweeps[i].id = id++;
}
}
// Remap IDs
for (int x = borderSize; x < w-borderSize; ++x)
{
const rcCompactCell& c = chf.cells[x+y*w];
for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i)
{
if (srcReg[i] > 0 && srcReg[i] < rid)
srcReg[i] = sweeps[srcReg[i]].id;
}
}
}
ctx->startTimer(RC_TIMER_BUILD_REGIONS_FILTER);
// Merge monotone regions to layers and remove small regions.
rcIntArray overlaps;
chf.maxRegions = id;
if (!mergeAndFilterLayerRegions(ctx, minRegionArea, chf.maxRegions, chf, srcReg, overlaps))
return false;
ctx->stopTimer(RC_TIMER_BUILD_REGIONS_FILTER);
// Store the result out.
for (int i = 0; i < chf.spanCount; ++i)
chf.spans[i].reg = srcReg[i];
ctx->stopTimer(RC_TIMER_BUILD_REGIONS);
return true;
}

View File

@ -59,6 +59,13 @@ enum SamplePolyFlags
SAMPLE_POLYFLAGS_ALL = 0xffff // All abilities.
};
enum SamplePartitionType
{
SAMPLE_PARTITION_WATERSHED,
SAMPLE_PARTITION_MONOTONE,
SAMPLE_PARTITION_LAYERS,
};
struct SampleTool
{
virtual ~SampleTool() {}
@ -101,12 +108,12 @@ protected:
float m_agentMaxSlope;
float m_regionMinSize;
float m_regionMergeSize;
bool m_monotonePartitioning;
float m_edgeMaxLen;
float m_edgeMaxError;
float m_vertsPerPoly;
float m_detailSampleDist;
float m_detailSampleMaxError;
int m_partitionType;
SampleTool* m_tool;
SampleToolState* m_toolStates[MAX_TOOLS];

View File

@ -129,12 +129,12 @@ void Sample::resetCommonSettings()
m_agentMaxSlope = 45.0f;
m_regionMinSize = 8;
m_regionMergeSize = 20;
m_monotonePartitioning = false;
m_edgeMaxLen = 12.0f;
m_edgeMaxError = 1.3f;
m_vertsPerPoly = 6.0f;
m_detailSampleDist = 6.0f;
m_detailSampleMaxError = 1.0f;
m_partitionType = SAMPLE_PARTITION_WATERSHED;
}
void Sample::handleCommonSettings()
@ -165,8 +165,15 @@ void Sample::handleCommonSettings()
imguiLabel("Region");
imguiSlider("Min Region Size", &m_regionMinSize, 0.0f, 150.0f, 1.0f);
imguiSlider("Merged Region Size", &m_regionMergeSize, 0.0f, 150.0f, 1.0f);
if (imguiCheck("Monotore Partitioning", m_monotonePartitioning))
m_monotonePartitioning = !m_monotonePartitioning;
imguiSeparator();
imguiLabel("Partitioning");
if (imguiCheck("Watershed", m_partitionType == SAMPLE_PARTITION_WATERSHED))
m_partitionType = SAMPLE_PARTITION_WATERSHED;
if (imguiCheck("Monotone", m_partitionType == SAMPLE_PARTITION_MONOTONE))
m_partitionType = SAMPLE_PARTITION_MONOTONE;
if (imguiCheck("Layers", m_partitionType == SAMPLE_PARTITION_LAYERS))
m_partitionType = SAMPLE_PARTITION_LAYERS;
imguiSeparator();
imguiLabel("Polygonization");

View File

@ -494,18 +494,35 @@ bool Sample_SoloMesh::handleBuild()
const ConvexVolume* vols = m_geom->getConvexVolumes();
for (int i = 0; i < m_geom->getConvexVolumeCount(); ++i)
rcMarkConvexPolyArea(m_ctx, vols[i].verts, vols[i].nverts, vols[i].hmin, vols[i].hmax, (unsigned char)vols[i].area, *m_chf);
if (m_monotonePartitioning)
{
// Partition the walkable surface into simple regions without holes.
// Monotone partitioning does not need distancefield.
if (!rcBuildRegionsMonotone(m_ctx, *m_chf, 0, m_cfg.minRegionArea, m_cfg.mergeRegionArea))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build regions.");
return false;
}
}
else
// Partition the heightfield so that we can use simple algorithm later to triangulate the walkable areas.
// There are 3 martitioning methods, each with some pros and cons:
// 1) Watershed partitioning
// - the classic Recast partitioning
// - creates the nicest tessellation
// - usually slowest
// - partitions the heightfield into nice regions without holes or overlaps
// - the are some corner cases where this method creates produces holes and overlaps
// - holes may appear when a small obstacles is close to large open area (triangulation can handle this)
// - overlaps may occur if you have narrow spiral corridors (i.e stairs), this make triangulation to fail
// * generally the best choice if you precompute the nacmesh, use this if you have large open areas
// 2) Monotone partioning
// - fastest
// - partitions the heightfield into regions without holes and overlaps (guaranteed)
// - creates long thin polygons, which sometimes causes paths with detours
// * use this if you want fast navmesh generation
// 3) Layer partitoining
// - quite fast
// - partitions the heighfield into non-overlapping regions
// - relies on the triangulation code to cope with holes (thus slower than monotone partitioning)
// - produces better triangles than monotone partitioning
// - does not have the corner cases of watershed partitioning
// - can be slow and create a bit ugly tessellation (still better than monotone)
// if you have large open areas with small obstacles (not a problem if you use tiles)
// * good choice to use for tiled navmesh with medium and small sized tiles
if (m_partitionType == SAMPLE_PARTITION_WATERSHED)
{
// Prepare for region partitioning, by calculating distance field along the walkable surface.
if (!rcBuildDistanceField(m_ctx, *m_chf))
@ -513,15 +530,34 @@ bool Sample_SoloMesh::handleBuild()
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build distance field.");
return false;
}
// Partition the walkable surface into simple regions without holes.
if (!rcBuildRegions(m_ctx, *m_chf, 0, m_cfg.minRegionArea, m_cfg.mergeRegionArea))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build regions.");
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build watershed regions.");
return false;
}
}
else if (m_partitionType == SAMPLE_PARTITION_MONOTONE)
{
// Partition the walkable surface into simple regions without holes.
// Monotone partitioning does not need distancefield.
if (!rcBuildRegionsMonotone(m_ctx, *m_chf, 0, m_cfg.minRegionArea, m_cfg.mergeRegionArea))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build monotone regions.");
return false;
}
}
else // SAMPLE_PARTITION_LAYERS
{
// Partition the walkable surface into simple regions without holes.
if (!rcBuildLayerRegions(m_ctx, *m_chf, 0, m_cfg.minRegionArea))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build layer regions.");
return false;
}
}
//
// Step 5. Trace and simplify region contours.
//

View File

@ -72,14 +72,12 @@ class NavMeshTileTool : public SampleTool
Sample_TileMesh* m_sample;
float m_hitPos[3];
bool m_hitPosSet;
float m_agentRadius;
public:
NavMeshTileTool() :
m_sample(0),
m_hitPosSet(false),
m_agentRadius(0)
m_hitPosSet(false)
{
m_hitPos[0] = m_hitPos[1] = m_hitPos[2] = 0;
}
@ -1048,32 +1046,69 @@ unsigned char* Sample_TileMesh::buildTileMesh(const int tx, const int ty, const
for (int i = 0; i < m_geom->getConvexVolumeCount(); ++i)
rcMarkConvexPolyArea(m_ctx, vols[i].verts, vols[i].nverts, vols[i].hmin, vols[i].hmax, (unsigned char)vols[i].area, *m_chf);
if (m_monotonePartitioning)
{
// Partition the walkable surface into simple regions without holes.
if (!rcBuildRegionsMonotone(m_ctx, *m_chf, m_cfg.borderSize, m_cfg.minRegionArea, m_cfg.mergeRegionArea))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build regions.");
return 0;
}
}
else
// Partition the heightfield so that we can use simple algorithm later to triangulate the walkable areas.
// There are 3 martitioning methods, each with some pros and cons:
// 1) Watershed partitioning
// - the classic Recast partitioning
// - creates the nicest tessellation
// - usually slowest
// - partitions the heightfield into nice regions without holes or overlaps
// - the are some corner cases where this method creates produces holes and overlaps
// - holes may appear when a small obstacles is close to large open area (triangulation can handle this)
// - overlaps may occur if you have narrow spiral corridors (i.e stairs), this make triangulation to fail
// * generally the best choice if you precompute the nacmesh, use this if you have large open areas
// 2) Monotone partioning
// - fastest
// - partitions the heightfield into regions without holes and overlaps (guaranteed)
// - creates long thin polygons, which sometimes causes paths with detours
// * use this if you want fast navmesh generation
// 3) Layer partitoining
// - quite fast
// - partitions the heighfield into non-overlapping regions
// - relies on the triangulation code to cope with holes (thus slower than monotone partitioning)
// - produces better triangles than monotone partitioning
// - does not have the corner cases of watershed partitioning
// - can be slow and create a bit ugly tessellation (still better than monotone)
// if you have large open areas with small obstacles (not a problem if you use tiles)
// * good choice to use for tiled navmesh with medium and small sized tiles
if (m_partitionType == SAMPLE_PARTITION_WATERSHED)
{
// Prepare for region partitioning, by calculating distance field along the walkable surface.
if (!rcBuildDistanceField(m_ctx, *m_chf))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build distance field.");
return 0;
return false;
}
// Partition the walkable surface into simple regions without holes.
if (!rcBuildRegions(m_ctx, *m_chf, m_cfg.borderSize, m_cfg.minRegionArea, m_cfg.mergeRegionArea))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build regions.");
return 0;
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build watershed regions.");
return false;
}
}
else if (m_partitionType == SAMPLE_PARTITION_MONOTONE)
{
// Partition the walkable surface into simple regions without holes.
// Monotone partitioning does not need distancefield.
if (!rcBuildRegionsMonotone(m_ctx, *m_chf, m_cfg.borderSize, m_cfg.minRegionArea, m_cfg.mergeRegionArea))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build monotone regions.");
return false;
}
}
else // SAMPLE_PARTITION_LAYERS
{
// Partition the walkable surface into simple regions without holes.
if (!rcBuildLayerRegions(m_ctx, *m_chf, m_cfg.borderSize, m_cfg.minRegionArea))
{
m_ctx->log(RC_LOG_ERROR, "buildNavigation: Could not build layer regions.");
return false;
}
}
// Create contours.
m_cset = rcAllocContourSet();
if (!m_cset)