1 /**
2 Copyright: Copyright (c) 2014-2016 Andrey Penechko.
3 License: $(WEB boost.org/LICENSE_1_0.txt, Boost License 1.0).
4 Authors: Andrey Penechko.
5 */
6 module voxelman.storage.region;
7 
8 import std.experimental.logger;
9 import std.bitmanip : BitArray, nativeToBigEndian, bigEndianToNative;
10 import std.file : exists;
11 import std.path : isValidPath, dirSeparator;
12 import std.stdio : File, SEEK_END;
13 import std.string : format;
14 import dlib.math.vector : ivec3;
15 
16 import voxelman.core.config : TimestampType;
17 import voxelman.storage.coordinates;
18 
19 int ceiling_pos (float X) {return (X-cast(int)(X)) > 0 ? cast(int)(X+1) : cast(int)(X);}
20 int ceiling_neg (float X) {return (X-cast(int)(X)) < 0 ? cast(int)(X-1) : cast(int)(X);}
21 int ceiling (float X) {return ((X) > 0) ? ceiling_pos(X) : ceiling_neg(X);}
22 
23 enum REGION_SIZE = 16;
24 enum REGION_SIZE_SQR = REGION_SIZE * REGION_SIZE;
25 enum REGION_SIZE_CUBE = REGION_SIZE * REGION_SIZE * REGION_SIZE;
26 enum SECTOR_SIZE = 512;
27 
28 enum HEADER_SIZE = REGION_SIZE_CUBE * uint.sizeof + REGION_SIZE_CUBE * TimestampType.sizeof;
29 enum NUM_HEADER_SECTORS = ceiling(float(HEADER_SIZE) / SECTOR_SIZE);
30 enum CHUNK_HEADER_SIZE = uint.sizeof;
31 
32 // Chunk sectors number is stored as ubyte, so max 255 sectors can be used.
33 enum MAX_CHUNK_SECTORS = ubyte.max;
34 
35 
36 private immutable ubyte[SECTOR_SIZE] emptySector;
37 
38 struct ChunkStoreInfo
39 {
40 	bool isStored;
41 	ChunkRegionPos positionInRegion;
42 	ChunkWorldPos positionInWorld;
43 	RegionWorldPos parentRegionPosition;
44 	ChunkRegionIndex headerIndex;
45 
46 	// following fields are only valid when isStored == true.
47 	size_t sectorNumber;
48 	size_t numSectors;
49 	TimestampType timestamp;
50 	size_t dataLength;
51 	size_t dataByteOffset() @property {return sectorNumber * SECTOR_SIZE;}
52 
53 	string toString()
54 	{
55 		return format("position in region %s\nposition in world %s\n"~
56 			"parent region %s\nheader index %s\n"~
57 			"data length %s\nsector number %s\ndata offset %s",
58 			positionInRegion, positionInWorld, parentRegionPosition,
59 			headerIndex.index, dataLength, sectorNumber, dataByteOffset);
60 	}
61 }
62 
63 /**
64 	A storage for REGION_SIZE^3 offsets on a disk.
65 
66 	Format:
67 	Header consists of REGION_SIZE^3 records of 4 bytes.
68 	record = position << 8 | (size & 0xFF)
69 	where position is the number of first sector and size is the total number
70 	of sectors that are used by a chunk.
71 	Header is followed by sectors each of size SECTOR_SIZE. 4096
72 
73 	Region files are named like x_y_z.region.
74 */
75 struct Region
76 {
77 	private File file;
78 	private uint[REGION_SIZE_CUBE] offsets;
79 	private TimestampType[REGION_SIZE_CUBE] timestamps;
80 	// true if free, false if occupied.
81 	private BitArray sectors;
82 
83 	@disable this();
84 	this(string regionFilename)
85 	{
86 		openFile(regionFilename);
87 	}
88 
89 	/// Closes the underlyiing file handle.
90 	/// Renders Region unusable after this.
91 	public void close()
92 	{
93 		file.close();
94 	}
95 
96 	/// Returns true if chunk is presented on disk.
97 	/// Positions are region local. I.e. 0..REGION_SIZE
98 	public bool isChunkOnDisk(ChunkRegionPos chunkPosition)
99 	{
100 		assert(isValidPosition(chunkPosition), format("Invalid position %s", chunkPosition));
101 		auto chunkIndex = ChunkRegionIndex(chunkPosition);
102 		return (offsets[chunkIndex] & 0xFF) != 0;
103 	}
104 
105 	public TimestampType chunkTimestamp(ChunkRegionPos chunkPosition)
106 	{
107 		assert(isValidPosition(chunkPosition), format("Invalid position %s", chunkPosition));
108 		auto chunkIndex = ChunkRegionIndex(chunkPosition);
109 		return timestamps[chunkIndex];
110 	}
111 
112 	public ChunkStoreInfo getChunkStoreInfo(ChunkRegionPos chunkPosition)
113 	{
114 		auto chunkIndex = ChunkRegionIndex(chunkPosition);
115 		auto sectorNumber = offsets[chunkIndex] >> 8;
116 		auto numSectors = offsets[chunkIndex] & 0xFF;
117 		auto timestamp = timestamps[chunkIndex];
118 
119 		ChunkStoreInfo res = ChunkStoreInfo(true, chunkPosition, ChunkWorldPos(),
120 			RegionWorldPos(), chunkIndex, sectorNumber, numSectors, timestamp);
121 		if (!isChunkOnDisk(chunkPosition))
122 		{
123 			res.isStored = false;
124 			return res;
125 		}
126 
127 		file.seek(sectorNumber * SECTOR_SIZE);
128 		ubyte[4] uintBuffer;
129 		file.rawRead(uintBuffer[]);
130 		res.dataLength = bigEndianToNative!uint(uintBuffer);
131 
132 		return res;
133 	}
134 
135 	/// Reads chunk from a file.
136 	/// Chunks positions are region local in range 0..REGION_SIZE.
137 	/// outBuffer should be big enough to store chunk of any size.
138 	/// Returns: a slice of outBuffer with actual data or null if chunk was not
139 	/// stored on disk previously.
140 	/// Positions are region local. I.e. 0..REGION_SIZE
141 	public ubyte[] readChunk(ChunkRegionPos chunkPosition, ubyte[] outBuffer, out TimestampType timestamp)
142 	{
143 		assert(isValidPosition(chunkPosition), format("Invalid position %s", chunkPosition));
144 		if (!isChunkOnDisk(chunkPosition)) return null;
145 
146 		auto chunkIndex = ChunkRegionIndex(chunkPosition);
147 		auto sectorNumber = offsets[chunkIndex] >> 8;
148 		auto numSectors = offsets[chunkIndex] & 0xFF;
149 
150 		// Chunk sector is after EOF.
151 		if (sectorNumber + numSectors > sectors.length)
152 		{
153 			errorf("Invalid sector chunk %s, sector %s, numSectors %s while total sectors %s",
154 				chunkPosition, sectorNumber, numSectors, sectors.length);
155 			errorf("Erasing chunk");
156 			eraseChunk(chunkIndex);
157 			return null;
158 		}
159 
160 		file.seek(sectorNumber * SECTOR_SIZE);
161 		ubyte[4] uintBuffer;
162 		file.rawRead(uintBuffer[]);
163 		uint dataLength = bigEndianToNative!uint(uintBuffer);
164 		//infof("read data length %s BE %(%02x%)", dataLength, uintBuffer[]);
165 
166 		if (dataLength > numSectors * SECTOR_SIZE)
167 		{
168 			errorf("Invalid data length %s, %s > %s * %s",
169 				chunkPosition, dataLength, numSectors, SECTOR_SIZE);
170 			errorf("Erasing chunk");
171 			eraseChunk(chunkIndex);
172 			return null;
173 		}
174 
175 		timestamp = timestamps[chunkIndex];
176 		return file.rawRead(outBuffer[0..dataLength]);
177 	}
178 
179 	/// Writes chunk at chunkPosition with data blockData to disk.
180 	/// Positions are region local. I.e. 0..REGION_SIZE
181 	public void writeChunk(ChunkRegionPos chunkPosition, in ubyte[] blockData, TimestampType timestamp)
182 	{
183 		assert(isValidPosition(chunkPosition), format("Invalid position %s", chunkPosition));
184 
185 		auto chunkIndex = ChunkRegionIndex(chunkPosition);
186 		auto sectorNumber = offsets[chunkIndex] >> 8;
187 		auto numSectors = offsets[chunkIndex] & 0xFF;
188 
189 		import std.math : ceil;
190 		auto sectorsNeeded = cast(size_t)ceil(
191 			cast(float)(blockData.length + CHUNK_HEADER_SIZE) / SECTOR_SIZE);
192 
193 		if (sectorsNeeded > MAX_CHUNK_SECTORS)
194 		{
195 			errorf("data length %s is too big", blockData.length);
196 			return;
197 		}
198 
199 		if (sectorNumber != 0 && numSectors == sectorsNeeded)
200 		{
201 			// Rewrite data in place.
202 			writeChunkData(sectorNumber, blockData);
203 			return;
204 		}
205 		//infof("searching for free sectors");
206 
207 		// Mark used sectors as free.
208 		foreach(i; sectorNumber..sectorNumber + numSectors)
209 			sectors[i] = true;
210 
211 		uint numFreeSectors = 0;
212 		size_t firstFreeSector = 0;
213 		// Find a sequence of free sectors of big enough size.
214 		foreach(sectorIndex; sectors.bitsSet)
215 		{
216 			//infof("chunkIndex %s", sectorIndex);
217 
218 			if (numFreeSectors > 0)
219 			{
220 				if (sectorIndex - firstFreeSector > 1) ++numFreeSectors;
221 				else numFreeSectors = 0;
222 			}
223 			else
224 			{
225 				firstFreeSector = sectorIndex;
226 				numFreeSectors = 1;
227 			}
228 
229 			if (numFreeSectors >= sectorsNeeded) break;
230 		}
231 		//infof("first %s num %s", firstFreeSector, numFreeSectors);
232 
233 		if (numFreeSectors < sectorsNeeded)
234 		{
235 			// We need to append if no free space was found.
236 			if (firstFreeSector + numFreeSectors < sectors.length)
237 			{
238 				firstFreeSector = sectors.length;
239 				numFreeSectors = 0;
240 			}
241 
242 			// But if we have free sectors at the end, lets use them.
243 			sectors.length = sectors.length + sectorsNeeded - numFreeSectors;
244 		}
245 
246 		//infof("first %s num %s", firstFreeSector, numFreeSectors);
247 		// Use free sectors found in a file.
248 		writeChunkData(cast(uint)firstFreeSector, blockData);
249 
250 		setChunkOffset(chunkIndex, cast(uint)firstFreeSector, cast(ubyte)sectorsNeeded);
251 		setChunkTimestamp(chunkIndex, timestamp);
252 
253 		foreach(i; firstFreeSector..firstFreeSector + sectorsNeeded)
254 			sectors[i] = false;
255 
256 		// Fix last sector size if we was appending.
257 		fixPadding();
258 	}
259 
260 	private bool isValidPosition(ChunkRegionPos chunkPosition)
261 	{
262 		return !(chunkPosition.x < 0 || chunkPosition.x >= REGION_SIZE ||
263 				chunkPosition.y < 0 || chunkPosition.y >= REGION_SIZE ||
264 				chunkPosition.z < 0 || chunkPosition.z >= REGION_SIZE);
265 	}
266 
267 	private void openFile(string regionFilename)
268 	{
269 		assert(isValidPath(regionFilename));
270 
271 		if (!exists(regionFilename))
272 		{
273 			//trace("write header");
274 			file.open(regionFilename, "wb+");
275 
276 			// Lets write chunk offset table.
277 			foreach(_; 0..(REGION_SIZE_CUBE*uint.sizeof) / SECTOR_SIZE)
278 				file.rawWrite(emptySector);
279 
280 			sectors.length = NUM_HEADER_SECTORS;
281 			//tracef("sectors %b", sectors);
282 			return;
283 		}
284 
285 		//trace("read header");
286 		file.open(regionFilename, "rb+");
287 		file.rawRead(offsets[]);
288 
289 		// File size is not multiple of SECTOR_SIZE, bump it.
290 		fixPadding();
291 
292 		version(LittleEndian)
293 		foreach(ref uint item; offsets)
294 			item = bigEndianToNative!uint(*cast(ubyte[4]*)&item);
295 
296 		sectors.length = cast(size_t)(file.size / SECTOR_SIZE);
297 		// Mark all data sectors as free.
298 		foreach(i; NUM_HEADER_SECTORS..sectors.length)
299 			sectors[i] = true;
300 
301 		foreach(i, offset; offsets)
302 		{
303 			auto sectorNumber = offset >> 8;
304 			auto numSectors = offset & 0xFF;
305 			// If chunk is stored on disk and is valid.
306 			if (offset != 0 && (sectorNumber + numSectors) <= sectors.length)
307 			{
308 				// Mark sectors as occupied.
309 				foreach(sector; sectorNumber..sectorNumber + numSectors)
310 					sectors[sector] = false;
311 			}
312 		}
313 
314 		//tracef("sectors %b", sectors);
315 	}
316 
317 	private void writeChunkData(uint sectorNumber, in ubyte[] data)
318 	{
319 		file.seek(sectorNumber * SECTOR_SIZE);
320 		//tracef("write data length %s BE %(%02x%), sector %s",
321 		//	data.length, nativeToBigEndian(cast(uint)data.length), sectorNumber);
322 		file.rawWrite(nativeToBigEndian(cast(uint)data.length));
323 		file.rawWrite(data);
324 	}
325 
326 	private void setChunkOffset(ChunkRegionIndex chunkIndex, uint position, ubyte size)
327 	{
328 		uint offset = (position << 8) | size;
329 		offsets[chunkIndex] = offset;
330 		file.seek(chunkIndex * uint.sizeof);
331 		file.rawWrite(nativeToBigEndian(offset));
332 	}
333 
334 	private void setChunkTimestamp(ChunkRegionIndex chunkIndex, TimestampType timestamp)
335 	{
336 		timestamps[chunkIndex] = timestamp;
337 		file.seek(REGION_SIZE_CUBE * uint.sizeof + chunkIndex * TimestampType.sizeof);
338 		file.rawWrite(nativeToBigEndian(timestamp));
339 	}
340 
341 	private void eraseChunk(ChunkRegionIndex chunkIndex)
342 	{
343 		setChunkTimestamp(chunkIndex, 0);
344 		setChunkOffset(chunkIndex, 0, 0);
345 	}
346 
347 	private void fixPadding()
348 	{
349 		auto lastSectorSize = file.size & (SECTOR_SIZE - 1);
350 		if (lastSectorSize != 0)
351 		{
352 			file.seek(0, SEEK_END);
353 			ubyte[1] emptyByte;
354 			foreach(_; 0..SECTOR_SIZE - lastSectorSize)
355 				file.rawWrite(emptyByte);
356 		}
357 	}
358 }