Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 1 | /** |
| 2 | * compress.c - Compressed attribute handling code. Originated from the Linux-NTFS |
| 3 | * project. |
| 4 | * |
| 5 | * Copyright (c) 2004-2005 Anton Altaparmakov |
| 6 | * Copyright (c) 2004-2006 Szabolcs Szakacsits |
| 7 | * Copyright (c) 2005 Yura Pakhuchiy |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 8 | * Copyright (c) 2009-2014 Jean-Pierre Andre |
| 9 | * Copyright (c) 2014 Eric Biggers |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 10 | * |
| 11 | * This program/include file is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU General Public License as published |
| 13 | * by the Free Software Foundation; either version 2 of the License, or |
| 14 | * (at your option) any later version. |
| 15 | * |
| 16 | * This program/include file is distributed in the hope that it will be |
| 17 | * useful, but WITHOUT ANY WARRANTY; without even the implied warranty |
| 18 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 19 | * GNU General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License |
| 22 | * along with this program (in the main directory of the NTFS-3G |
| 23 | * distribution in the file COPYING); if not, write to the Free Software |
| 24 | * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 25 | */ |
| 26 | |
| 27 | #ifdef HAVE_CONFIG_H |
| 28 | #include "config.h" |
| 29 | #endif |
| 30 | |
| 31 | #ifdef HAVE_STDIO_H |
| 32 | #include <stdio.h> |
| 33 | #endif |
| 34 | #ifdef HAVE_STRING_H |
| 35 | #include <string.h> |
| 36 | #endif |
| 37 | #ifdef HAVE_STDLIB_H |
| 38 | #include <stdlib.h> |
| 39 | #endif |
| 40 | #ifdef HAVE_ERRNO_H |
| 41 | #include <errno.h> |
| 42 | #endif |
| 43 | |
| 44 | #include "attrib.h" |
| 45 | #include "debug.h" |
| 46 | #include "volume.h" |
| 47 | #include "types.h" |
| 48 | #include "layout.h" |
| 49 | #include "runlist.h" |
| 50 | #include "compress.h" |
| 51 | #include "lcnalloc.h" |
| 52 | #include "logging.h" |
| 53 | #include "misc.h" |
| 54 | |
| 55 | #undef le16_to_cpup |
| 56 | /* the standard le16_to_cpup() crashes for unaligned data on some processors */ |
| 57 | #define le16_to_cpup(p) (*(u8*)(p) + (((u8*)(p))[1] << 8)) |
| 58 | |
| 59 | /** |
| 60 | * enum ntfs_compression_constants - constants used in the compression code |
| 61 | */ |
| 62 | typedef enum { |
| 63 | /* Token types and access mask. */ |
| 64 | NTFS_SYMBOL_TOKEN = 0, |
| 65 | NTFS_PHRASE_TOKEN = 1, |
| 66 | NTFS_TOKEN_MASK = 1, |
| 67 | |
| 68 | /* Compression sub-block constants. */ |
| 69 | NTFS_SB_SIZE_MASK = 0x0fff, |
| 70 | NTFS_SB_SIZE = 0x1000, |
| 71 | NTFS_SB_IS_COMPRESSED = 0x8000, |
| 72 | } ntfs_compression_constants; |
| 73 | |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 74 | /* Match length at or above which ntfs_best_match() will stop searching for |
| 75 | * longer matches. */ |
| 76 | #define NICE_MATCH_LEN 18 |
| 77 | |
| 78 | /* Maximum number of potential matches that ntfs_best_match() will consider at |
| 79 | * each position. */ |
| 80 | #define MAX_SEARCH_DEPTH 24 |
| 81 | |
| 82 | /* log base 2 of the number of entries in the hash table for match-finding. */ |
| 83 | #define HASH_SHIFT 14 |
| 84 | |
| 85 | /* Constant for the multiplicative hash function. */ |
| 86 | #define HASH_MULTIPLIER 0x1E35A7BD |
| 87 | |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 88 | struct COMPRESS_CONTEXT { |
| 89 | const unsigned char *inbuf; |
| 90 | int bufsize; |
| 91 | int size; |
| 92 | int rel; |
| 93 | int mxsz; |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 94 | s16 head[1 << HASH_SHIFT]; |
| 95 | s16 prev[NTFS_SB_SIZE]; |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 96 | } ; |
| 97 | |
| 98 | /* |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 99 | * Hash the next 3-byte sequence in the input buffer |
| 100 | */ |
| 101 | static inline unsigned int ntfs_hash(const u8 *p) |
| 102 | { |
| 103 | u32 str; |
| 104 | u32 hash; |
| 105 | |
| 106 | #if defined(__i386__) || defined(__x86_64__) |
| 107 | /* Unaligned access allowed, and little endian CPU. |
| 108 | * Callers ensure that at least 4 (not 3) bytes are remaining. */ |
| 109 | str = *(const u32 *)p & 0xFFFFFF; |
| 110 | #else |
| 111 | str = ((u32)p[0] << 0) | ((u32)p[1] << 8) | ((u32)p[2] << 16); |
| 112 | #endif |
| 113 | |
| 114 | hash = str * HASH_MULTIPLIER; |
| 115 | |
| 116 | /* High bits are more random than the low bits. */ |
| 117 | return hash >> (32 - HASH_SHIFT); |
| 118 | } |
| 119 | |
| 120 | /* |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 121 | * Search for the longest sequence matching current position |
| 122 | * |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 123 | * A hash table, each entry of which points to a chain of sequence |
| 124 | * positions sharing the corresponding hash code, is maintained to speed up |
| 125 | * searching for matches. To maintain the hash table, either |
| 126 | * ntfs_best_match() or ntfs_skip_position() has to be called for each |
| 127 | * consecutive position. |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 128 | * |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 129 | * This function is heavily used; it has to be optimized carefully. |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 130 | * |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 131 | * This function sets pctx->size and pctx->rel to the length and offset, |
| 132 | * respectively, of the longest match found. |
| 133 | * |
| 134 | * The minimum match length is assumed to be 3, and the maximum match |
| 135 | * length is assumed to be pctx->mxsz. If this function produces |
| 136 | * pctx->size < 3, then no match was found. |
| 137 | * |
| 138 | * Note: for the following reasons, this function is not guaranteed to find |
| 139 | * *the* longest match up to pctx->mxsz: |
| 140 | * |
| 141 | * (1) If this function finds a match of NICE_MATCH_LEN bytes or greater, |
| 142 | * it ends early because a match this long is good enough and it's not |
| 143 | * worth spending more time searching. |
| 144 | * |
| 145 | * (2) If this function considers MAX_SEARCH_DEPTH matches with a single |
| 146 | * position, it ends early and returns the longest match found so far. |
| 147 | * This saves a lot of time on degenerate inputs. |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 148 | */ |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 149 | static void ntfs_best_match(struct COMPRESS_CONTEXT *pctx, const int i, |
| 150 | int best_len) |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 151 | { |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 152 | const u8 * const inbuf = pctx->inbuf; |
| 153 | const u8 * const strptr = &inbuf[i]; /* String we're matching against */ |
| 154 | s16 * const prev = pctx->prev; |
| 155 | const int max_len = min(pctx->bufsize - i, pctx->mxsz); |
| 156 | const int nice_len = min(NICE_MATCH_LEN, max_len); |
| 157 | int depth_remaining = MAX_SEARCH_DEPTH; |
| 158 | const u8 *best_matchptr = strptr; |
| 159 | unsigned int hash; |
| 160 | s16 cur_match; |
| 161 | const u8 *matchptr; |
| 162 | int len; |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 163 | |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 164 | if (max_len < 4) |
| 165 | goto out; |
| 166 | |
| 167 | /* Insert the current sequence into the appropriate hash chain. */ |
| 168 | hash = ntfs_hash(strptr); |
| 169 | cur_match = pctx->head[hash]; |
| 170 | prev[i] = cur_match; |
| 171 | pctx->head[hash] = i; |
| 172 | |
| 173 | if (best_len >= max_len) { |
| 174 | /* Lazy match is being attempted, but there aren't enough length |
| 175 | * bits remaining to code a longer match. */ |
| 176 | goto out; |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 177 | } |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 178 | |
| 179 | /* Search the appropriate hash chain for matches. */ |
| 180 | |
| 181 | for (; cur_match >= 0 && depth_remaining--; |
| 182 | cur_match = prev[cur_match]) |
| 183 | { |
| 184 | |
| 185 | matchptr = &inbuf[cur_match]; |
| 186 | |
| 187 | /* Considering the potential match at 'matchptr': is it longer |
| 188 | * than 'best_len'? |
| 189 | * |
| 190 | * The bytes at index 'best_len' are the most likely to differ, |
| 191 | * so check them first. |
| 192 | * |
| 193 | * The bytes at indices 'best_len - 1' and '0' are less |
| 194 | * important to check separately. But doing so still gives a |
| 195 | * slight performance improvement, at least on x86_64, probably |
| 196 | * because they create separate branches for the CPU to predict |
| 197 | * independently of the branches in the main comparison loops. |
| 198 | */ |
| 199 | if (matchptr[best_len] != strptr[best_len] || |
| 200 | matchptr[best_len - 1] != strptr[best_len - 1] || |
| 201 | matchptr[0] != strptr[0]) |
| 202 | goto next_match; |
| 203 | |
| 204 | for (len = 1; len < best_len - 1; len++) |
| 205 | if (matchptr[len] != strptr[len]) |
| 206 | goto next_match; |
| 207 | |
| 208 | /* The match is the longest found so far --- |
| 209 | * at least 'best_len' + 1 bytes. Continue extending it. */ |
| 210 | |
| 211 | best_matchptr = matchptr; |
| 212 | |
| 213 | do { |
| 214 | if (++best_len >= nice_len) { |
| 215 | /* 'nice_len' reached; don't waste time |
| 216 | * searching for longer matches. Extend the |
| 217 | * match as far as possible and terminate the |
| 218 | * search. */ |
| 219 | while (best_len < max_len && |
| 220 | (best_matchptr[best_len] == |
| 221 | strptr[best_len])) |
| 222 | { |
| 223 | best_len++; |
| 224 | } |
| 225 | goto out; |
| 226 | } |
| 227 | } while (best_matchptr[best_len] == strptr[best_len]); |
| 228 | |
| 229 | /* Found a longer match, but 'nice_len' not yet reached. */ |
| 230 | |
| 231 | next_match: |
| 232 | /* Continue to next match in the chain. */ |
| 233 | ; |
| 234 | } |
| 235 | |
| 236 | /* Reached end of chain, or ended early due to reaching the maximum |
| 237 | * search depth. */ |
| 238 | |
| 239 | out: |
| 240 | /* Return the longest match we were able to find. */ |
| 241 | pctx->size = best_len; |
| 242 | pctx->rel = best_matchptr - strptr; /* given as a negative number! */ |
| 243 | } |
| 244 | |
| 245 | /* |
| 246 | * Advance the match-finder, but don't search for matches. |
| 247 | */ |
| 248 | static void ntfs_skip_position(struct COMPRESS_CONTEXT *pctx, const int i) |
| 249 | { |
| 250 | unsigned int hash; |
| 251 | |
| 252 | if (pctx->bufsize - i < 4) |
| 253 | return; |
| 254 | |
| 255 | /* Insert the current sequence into the appropriate hash chain. */ |
| 256 | hash = ntfs_hash(pctx->inbuf + i); |
| 257 | pctx->prev[i] = pctx->head[hash]; |
| 258 | pctx->head[hash] = i; |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | /* |
| 262 | * Compress a 4096-byte block |
| 263 | * |
| 264 | * Returns a header of two bytes followed by the compressed data. |
| 265 | * If compression is not effective, the header and an uncompressed |
| 266 | * block is returned. |
| 267 | * |
| 268 | * Note : two bytes may be output before output buffer overflow |
| 269 | * is detected, so a 4100-bytes output buffer must be reserved. |
| 270 | * |
| 271 | * Returns the size of the compressed block, including the |
| 272 | * header (minimal size is 2, maximum size is 4098) |
| 273 | * 0 if an error has been met. |
| 274 | */ |
| 275 | |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 276 | static unsigned int ntfs_compress_block(const char *inbuf, const int bufsize, |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 277 | char *outbuf) |
| 278 | { |
| 279 | struct COMPRESS_CONTEXT *pctx; |
| 280 | int i; /* current position */ |
| 281 | int j; /* end of best match from current position */ |
| 282 | int k; /* end of best match from next position */ |
| 283 | int offs; /* offset to best match */ |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 284 | int bp; /* bits to store offset */ |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 285 | int bp_cur; /* saved bits to store offset at current position */ |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 286 | int mxoff; /* max match offset : 1 << bp */ |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 287 | unsigned int xout; |
| 288 | unsigned int q; /* aggregated offset and size */ |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 289 | int have_match; /* do we have a match at the current position? */ |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 290 | char *ptag; /* location reserved for a tag */ |
| 291 | int tag; /* current value of tag */ |
| 292 | int ntag; /* count of bits still undefined in tag */ |
| 293 | |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 294 | pctx = ntfs_malloc(sizeof(struct COMPRESS_CONTEXT)); |
| 295 | if (!pctx) { |
| 296 | errno = ENOMEM; |
| 297 | return 0; |
| 298 | } |
| 299 | |
| 300 | /* All hash chains start as empty. The special value '-1' indicates the |
| 301 | * end of each hash chain. */ |
| 302 | memset(pctx->head, 0xFF, sizeof(pctx->head)); |
| 303 | |
| 304 | pctx->inbuf = (const unsigned char*)inbuf; |
| 305 | pctx->bufsize = bufsize; |
| 306 | xout = 2; |
| 307 | i = 0; |
| 308 | bp = 4; |
| 309 | mxoff = 1 << bp; |
| 310 | pctx->mxsz = (1 << (16 - bp)) + 2; |
| 311 | have_match = 0; |
| 312 | tag = 0; |
| 313 | ntag = 8; |
| 314 | ptag = &outbuf[xout++]; |
| 315 | |
| 316 | while ((i < bufsize) && (xout < (NTFS_SB_SIZE + 2))) { |
| 317 | |
| 318 | /* This implementation uses "lazy" parsing: it always chooses |
| 319 | * the longest match, unless the match at the next position is |
| 320 | * longer. This is the same strategy used by the high |
| 321 | * compression modes of zlib. */ |
| 322 | |
| 323 | if (!have_match) { |
| 324 | /* Find the longest match at the current position. But |
| 325 | * first adjust the maximum match length if needed. |
| 326 | * (This loop might need to run more than one time in |
| 327 | * the case that we just output a long match.) */ |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 328 | while (mxoff < i) { |
| 329 | bp++; |
| 330 | mxoff <<= 1; |
| 331 | pctx->mxsz = (pctx->mxsz + 2) >> 1; |
| 332 | } |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 333 | ntfs_best_match(pctx, i, 2); |
| 334 | } |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 335 | |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 336 | if (pctx->size >= 3) { |
| 337 | |
| 338 | /* Found a match at the current position. */ |
| 339 | |
| 340 | j = i + pctx->size; |
| 341 | bp_cur = bp; |
| 342 | offs = pctx->rel; |
| 343 | |
| 344 | if (pctx->size >= NICE_MATCH_LEN) { |
| 345 | |
| 346 | /* Choose long matches immediately. */ |
| 347 | |
| 348 | q = (~offs << (16 - bp_cur)) + (j - i - 3); |
| 349 | outbuf[xout++] = q & 255; |
| 350 | outbuf[xout++] = (q >> 8) & 255; |
| 351 | tag |= (1 << (8 - ntag)); |
| 352 | |
| 353 | if (j == bufsize) { |
| 354 | /* Shortcut if the match extends to the |
| 355 | * end of the buffer. */ |
| 356 | i = j; |
| 357 | --ntag; |
| 358 | break; |
| 359 | } |
| 360 | i += 1; |
| 361 | do { |
| 362 | ntfs_skip_position(pctx, i); |
| 363 | } while (++i != j); |
| 364 | have_match = 0; |
| 365 | } else { |
| 366 | /* Check for a longer match at the next |
| 367 | * position. */ |
| 368 | |
| 369 | /* Doesn't need to be while() since we just |
| 370 | * adjusted the maximum match length at the |
| 371 | * previous position. */ |
| 372 | if (mxoff < i + 1) { |
| 373 | bp++; |
| 374 | mxoff <<= 1; |
| 375 | pctx->mxsz = (pctx->mxsz + 2) >> 1; |
| 376 | } |
| 377 | ntfs_best_match(pctx, i + 1, pctx->size); |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 378 | k = i + 1 + pctx->size; |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 379 | |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 380 | if (k > (j + 1)) { |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 381 | /* Next match is longer. |
| 382 | * Output a literal. */ |
| 383 | outbuf[xout++] = inbuf[i++]; |
| 384 | have_match = 1; |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 385 | } else { |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 386 | /* Next match isn't longer. |
| 387 | * Output the current match. */ |
| 388 | q = (~offs << (16 - bp_cur)) + |
| 389 | (j - i - 3); |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 390 | outbuf[xout++] = q & 255; |
| 391 | outbuf[xout++] = (q >> 8) & 255; |
| 392 | tag |= (1 << (8 - ntag)); |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 393 | |
| 394 | /* The minimum match length is 3, and |
| 395 | * we've run two bytes through the |
| 396 | * matchfinder already. So the minimum |
| 397 | * number of positions we need to skip |
| 398 | * is 1. */ |
| 399 | i += 2; |
| 400 | do { |
| 401 | ntfs_skip_position(pctx, i); |
| 402 | } while (++i != j); |
| 403 | have_match = 0; |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 404 | } |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 405 | } |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 406 | } else { |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 407 | /* No match at current position. Output a literal. */ |
| 408 | outbuf[xout++] = inbuf[i++]; |
| 409 | have_match = 0; |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 410 | } |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 411 | |
| 412 | /* Store the tag if fully used. */ |
| 413 | if (!--ntag) { |
| 414 | *ptag = tag; |
| 415 | ntag = 8; |
| 416 | ptag = &outbuf[xout++]; |
| 417 | tag = 0; |
| 418 | } |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 419 | } |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 420 | |
| 421 | /* Store the last tag if partially used. */ |
| 422 | if (ntag == 8) |
| 423 | xout--; |
| 424 | else |
| 425 | *ptag = tag; |
| 426 | |
| 427 | /* Determine whether to store the data compressed or uncompressed. */ |
| 428 | |
| 429 | if ((i >= bufsize) && (xout < (NTFS_SB_SIZE + 2))) { |
| 430 | /* Compressed. */ |
| 431 | outbuf[0] = (xout - 3) & 255; |
| 432 | outbuf[1] = 0xb0 + (((xout - 3) >> 8) & 15); |
| 433 | } else { |
| 434 | /* Uncompressed. */ |
| 435 | memcpy(&outbuf[2], inbuf, bufsize); |
| 436 | if (bufsize < NTFS_SB_SIZE) |
| 437 | memset(&outbuf[bufsize + 2], 0, NTFS_SB_SIZE - bufsize); |
| 438 | outbuf[0] = 0xff; |
| 439 | outbuf[1] = 0x3f; |
| 440 | xout = NTFS_SB_SIZE + 2; |
| 441 | } |
| 442 | |
| 443 | /* Free the compression context and return the total number of bytes |
| 444 | * written to 'outbuf'. */ |
| 445 | free(pctx); |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 446 | return (xout); |
| 447 | } |
| 448 | |
| 449 | /** |
| 450 | * ntfs_decompress - decompress a compression block into an array of pages |
| 451 | * @dest: buffer to which to write the decompressed data |
| 452 | * @dest_size: size of buffer @dest in bytes |
| 453 | * @cb_start: compression block to decompress |
| 454 | * @cb_size: size of compression block @cb_start in bytes |
| 455 | * |
| 456 | * This decompresses the compression block @cb_start into the destination |
| 457 | * buffer @dest. |
| 458 | * |
| 459 | * @cb_start is a pointer to the compression block which needs decompressing |
| 460 | * and @cb_size is the size of @cb_start in bytes (8-64kiB). |
| 461 | * |
| 462 | * Return 0 if success or -EOVERFLOW on error in the compressed stream. |
| 463 | */ |
| 464 | static int ntfs_decompress(u8 *dest, const u32 dest_size, |
| 465 | u8 *const cb_start, const u32 cb_size) |
| 466 | { |
| 467 | /* |
| 468 | * Pointers into the compressed data, i.e. the compression block (cb), |
| 469 | * and the therein contained sub-blocks (sb). |
| 470 | */ |
| 471 | u8 *cb_end = cb_start + cb_size; /* End of cb. */ |
| 472 | u8 *cb = cb_start; /* Current position in cb. */ |
| 473 | u8 *cb_sb_start = cb; /* Beginning of the current sb in the cb. */ |
| 474 | u8 *cb_sb_end; /* End of current sb / beginning of next sb. */ |
| 475 | /* Variables for uncompressed data / destination. */ |
| 476 | u8 *dest_end = dest + dest_size; /* End of dest buffer. */ |
| 477 | u8 *dest_sb_start; /* Start of current sub-block in dest. */ |
| 478 | u8 *dest_sb_end; /* End of current sb in dest. */ |
| 479 | /* Variables for tag and token parsing. */ |
| 480 | u8 tag; /* Current tag. */ |
| 481 | int token; /* Loop counter for the eight tokens in tag. */ |
| 482 | |
| 483 | ntfs_log_trace("Entering, cb_size = 0x%x.\n", (unsigned)cb_size); |
| 484 | do_next_sb: |
| 485 | ntfs_log_debug("Beginning sub-block at offset = %d in the cb.\n", |
| 486 | (int)(cb - cb_start)); |
| 487 | /* |
| 488 | * Have we reached the end of the compression block or the end of the |
| 489 | * decompressed data? The latter can happen for example if the current |
| 490 | * position in the compression block is one byte before its end so the |
| 491 | * first two checks do not detect it. |
| 492 | */ |
| 493 | if (cb == cb_end || !le16_to_cpup((le16*)cb) || dest == dest_end) { |
| 494 | ntfs_log_debug("Completed. Returning success (0).\n"); |
| 495 | return 0; |
| 496 | } |
| 497 | /* Setup offset for the current sub-block destination. */ |
| 498 | dest_sb_start = dest; |
| 499 | dest_sb_end = dest + NTFS_SB_SIZE; |
| 500 | /* Check that we are still within allowed boundaries. */ |
| 501 | if (dest_sb_end > dest_end) |
| 502 | goto return_overflow; |
| 503 | /* Does the minimum size of a compressed sb overflow valid range? */ |
| 504 | if (cb + 6 > cb_end) |
| 505 | goto return_overflow; |
| 506 | /* Setup the current sub-block source pointers and validate range. */ |
| 507 | cb_sb_start = cb; |
| 508 | cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK) |
| 509 | + 3; |
| 510 | if (cb_sb_end > cb_end) |
| 511 | goto return_overflow; |
| 512 | /* Now, we are ready to process the current sub-block (sb). */ |
| 513 | if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) { |
| 514 | ntfs_log_debug("Found uncompressed sub-block.\n"); |
| 515 | /* This sb is not compressed, just copy it into destination. */ |
| 516 | /* Advance source position to first data byte. */ |
| 517 | cb += 2; |
| 518 | /* An uncompressed sb must be full size. */ |
| 519 | if (cb_sb_end - cb != NTFS_SB_SIZE) |
| 520 | goto return_overflow; |
| 521 | /* Copy the block and advance the source position. */ |
| 522 | memcpy(dest, cb, NTFS_SB_SIZE); |
| 523 | cb += NTFS_SB_SIZE; |
| 524 | /* Advance destination position to next sub-block. */ |
| 525 | dest += NTFS_SB_SIZE; |
| 526 | goto do_next_sb; |
| 527 | } |
| 528 | ntfs_log_debug("Found compressed sub-block.\n"); |
| 529 | /* This sb is compressed, decompress it into destination. */ |
| 530 | /* Forward to the first tag in the sub-block. */ |
| 531 | cb += 2; |
| 532 | do_next_tag: |
| 533 | if (cb == cb_sb_end) { |
| 534 | /* Check if the decompressed sub-block was not full-length. */ |
| 535 | if (dest < dest_sb_end) { |
| 536 | int nr_bytes = dest_sb_end - dest; |
| 537 | |
| 538 | ntfs_log_debug("Filling incomplete sub-block with zeroes.\n"); |
| 539 | /* Zero remainder and update destination position. */ |
| 540 | memset(dest, 0, nr_bytes); |
| 541 | dest += nr_bytes; |
| 542 | } |
| 543 | /* We have finished the current sub-block. */ |
| 544 | goto do_next_sb; |
| 545 | } |
| 546 | /* Check we are still in range. */ |
| 547 | if (cb > cb_sb_end || dest > dest_sb_end) |
| 548 | goto return_overflow; |
| 549 | /* Get the next tag and advance to first token. */ |
| 550 | tag = *cb++; |
| 551 | /* Parse the eight tokens described by the tag. */ |
| 552 | for (token = 0; token < 8; token++, tag >>= 1) { |
| 553 | u16 lg, pt, length, max_non_overlap; |
| 554 | register u16 i; |
| 555 | u8 *dest_back_addr; |
| 556 | |
| 557 | /* Check if we are done / still in range. */ |
| 558 | if (cb >= cb_sb_end || dest > dest_sb_end) |
| 559 | break; |
| 560 | /* Determine token type and parse appropriately.*/ |
| 561 | if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) { |
| 562 | /* |
| 563 | * We have a symbol token, copy the symbol across, and |
| 564 | * advance the source and destination positions. |
| 565 | */ |
| 566 | *dest++ = *cb++; |
| 567 | /* Continue with the next token. */ |
| 568 | continue; |
| 569 | } |
| 570 | /* |
| 571 | * We have a phrase token. Make sure it is not the first tag in |
| 572 | * the sb as this is illegal and would confuse the code below. |
| 573 | */ |
| 574 | if (dest == dest_sb_start) |
| 575 | goto return_overflow; |
| 576 | /* |
| 577 | * Determine the number of bytes to go back (p) and the number |
| 578 | * of bytes to copy (l). We use an optimized algorithm in which |
| 579 | * we first calculate log2(current destination position in sb), |
| 580 | * which allows determination of l and p in O(1) rather than |
| 581 | * O(n). We just need an arch-optimized log2() function now. |
| 582 | */ |
| 583 | lg = 0; |
| 584 | for (i = dest - dest_sb_start - 1; i >= 0x10; i >>= 1) |
| 585 | lg++; |
| 586 | /* Get the phrase token into i. */ |
| 587 | pt = le16_to_cpup((le16*)cb); |
| 588 | /* |
| 589 | * Calculate starting position of the byte sequence in |
| 590 | * the destination using the fact that p = (pt >> (12 - lg)) + 1 |
| 591 | * and make sure we don't go too far back. |
| 592 | */ |
| 593 | dest_back_addr = dest - (pt >> (12 - lg)) - 1; |
| 594 | if (dest_back_addr < dest_sb_start) |
| 595 | goto return_overflow; |
| 596 | /* Now calculate the length of the byte sequence. */ |
| 597 | length = (pt & (0xfff >> lg)) + 3; |
| 598 | /* Verify destination is in range. */ |
| 599 | if (dest + length > dest_sb_end) |
| 600 | goto return_overflow; |
| 601 | /* The number of non-overlapping bytes. */ |
| 602 | max_non_overlap = dest - dest_back_addr; |
| 603 | if (length <= max_non_overlap) { |
| 604 | /* The byte sequence doesn't overlap, just copy it. */ |
| 605 | memcpy(dest, dest_back_addr, length); |
| 606 | /* Advance destination pointer. */ |
| 607 | dest += length; |
| 608 | } else { |
| 609 | /* |
| 610 | * The byte sequence does overlap, copy non-overlapping |
| 611 | * part and then do a slow byte by byte copy for the |
| 612 | * overlapping part. Also, advance the destination |
| 613 | * pointer. |
| 614 | */ |
| 615 | memcpy(dest, dest_back_addr, max_non_overlap); |
| 616 | dest += max_non_overlap; |
| 617 | dest_back_addr += max_non_overlap; |
| 618 | length -= max_non_overlap; |
| 619 | while (length--) |
| 620 | *dest++ = *dest_back_addr++; |
| 621 | } |
| 622 | /* Advance source position and continue with the next token. */ |
| 623 | cb += 2; |
| 624 | } |
| 625 | /* No tokens left in the current tag. Continue with the next tag. */ |
| 626 | goto do_next_tag; |
| 627 | return_overflow: |
| 628 | errno = EOVERFLOW; |
| 629 | ntfs_log_perror("Failed to decompress file"); |
| 630 | return -1; |
| 631 | } |
| 632 | |
| 633 | /** |
| 634 | * ntfs_is_cb_compressed - internal function, do not use |
| 635 | * |
| 636 | * This is a very specialised function determining if a cb is compressed or |
| 637 | * uncompressed. It is assumed that checking for a sparse cb has already been |
| 638 | * performed and that the cb is not sparse. It makes all sorts of other |
| 639 | * assumptions as well and hence it is not useful anywhere other than where it |
| 640 | * is used at the moment. Please, do not make this function available for use |
| 641 | * outside of compress.c as it is bound to confuse people and not do what they |
| 642 | * want. |
| 643 | * |
| 644 | * Return TRUE on errors so that the error will be detected later on in the |
| 645 | * code. Might be a bit confusing to debug but there really should never be |
| 646 | * errors coming from here. |
| 647 | */ |
| 648 | static BOOL ntfs_is_cb_compressed(ntfs_attr *na, runlist_element *rl, |
| 649 | VCN cb_start_vcn, int cb_clusters) |
| 650 | { |
| 651 | /* |
| 652 | * The simplest case: the run starting at @cb_start_vcn contains |
| 653 | * @cb_clusters clusters which are all not sparse, thus the cb is not |
| 654 | * compressed. |
| 655 | */ |
| 656 | restart: |
| 657 | cb_clusters -= rl->length - (cb_start_vcn - rl->vcn); |
| 658 | while (cb_clusters > 0) { |
| 659 | /* Go to the next run. */ |
| 660 | rl++; |
| 661 | /* Map the next runlist fragment if it is not mapped. */ |
| 662 | if (rl->lcn < LCN_HOLE || !rl->length) { |
| 663 | cb_start_vcn = rl->vcn; |
| 664 | rl = ntfs_attr_find_vcn(na, rl->vcn); |
| 665 | if (!rl || rl->lcn < LCN_HOLE || !rl->length) |
| 666 | return TRUE; |
| 667 | /* |
| 668 | * If the runs were merged need to deal with the |
| 669 | * resulting partial run so simply restart. |
| 670 | */ |
| 671 | if (rl->vcn < cb_start_vcn) |
| 672 | goto restart; |
| 673 | } |
| 674 | /* If the current run is sparse, the cb is compressed. */ |
| 675 | if (rl->lcn == LCN_HOLE) |
| 676 | return TRUE; |
| 677 | /* If the whole cb is not sparse, it is not compressed. */ |
| 678 | if (rl->length >= cb_clusters) |
| 679 | return FALSE; |
| 680 | cb_clusters -= rl->length; |
| 681 | }; |
| 682 | /* All cb_clusters were not sparse thus the cb is not compressed. */ |
| 683 | return FALSE; |
| 684 | } |
| 685 | |
| 686 | /** |
| 687 | * ntfs_compressed_attr_pread - read from a compressed attribute |
| 688 | * @na: ntfs attribute to read from |
| 689 | * @pos: byte position in the attribute to begin reading from |
| 690 | * @count: number of bytes to read |
| 691 | * @b: output data buffer |
| 692 | * |
| 693 | * NOTE: You probably want to be using attrib.c::ntfs_attr_pread() instead. |
| 694 | * |
| 695 | * This function will read @count bytes starting at offset @pos from the |
| 696 | * compressed ntfs attribute @na into the data buffer @b. |
| 697 | * |
| 698 | * On success, return the number of successfully read bytes. If this number |
| 699 | * is lower than @count this means that the read reached end of file or that |
| 700 | * an error was encountered during the read so that the read is partial. |
| 701 | * 0 means end of file or nothing was read (also return 0 when @count is 0). |
| 702 | * |
| 703 | * On error and nothing has been read, return -1 with errno set appropriately |
| 704 | * to the return code of ntfs_pread(), or to EINVAL in case of invalid |
| 705 | * arguments. |
| 706 | */ |
| 707 | s64 ntfs_compressed_attr_pread(ntfs_attr *na, s64 pos, s64 count, void *b) |
| 708 | { |
| 709 | s64 br, to_read, ofs, total, total2; |
| 710 | u64 cb_size_mask; |
| 711 | VCN start_vcn, vcn, end_vcn; |
| 712 | ntfs_volume *vol; |
| 713 | runlist_element *rl; |
| 714 | u8 *dest, *cb, *cb_pos, *cb_end; |
| 715 | u32 cb_size; |
| 716 | int err; |
| 717 | ATTR_FLAGS data_flags; |
| 718 | FILE_ATTR_FLAGS compression; |
| 719 | unsigned int nr_cbs, cb_clusters; |
| 720 | |
| 721 | ntfs_log_trace("Entering for inode 0x%llx, attr 0x%x, pos 0x%llx, count 0x%llx.\n", |
Steve Kondik | e68cb60 | 2016-08-28 00:45:36 -0700 | [diff] [blame] | 722 | (unsigned long long)na->ni->mft_no, le32_to_cpu(na->type), |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 723 | (long long)pos, (long long)count); |
| 724 | data_flags = na->data_flags; |
| 725 | compression = na->ni->flags & FILE_ATTR_COMPRESSED; |
| 726 | if (!na || !na->ni || !na->ni->vol || !b |
| 727 | || ((data_flags & ATTR_COMPRESSION_MASK) |
| 728 | != ATTR_IS_COMPRESSED) |
| 729 | || pos < 0 || count < 0) { |
| 730 | errno = EINVAL; |
| 731 | return -1; |
| 732 | } |
| 733 | /* |
| 734 | * Encrypted attributes are not supported. We return access denied, |
| 735 | * which is what Windows NT4 does, too. |
| 736 | */ |
| 737 | if (NAttrEncrypted(na)) { |
| 738 | errno = EACCES; |
| 739 | return -1; |
| 740 | } |
| 741 | if (!count) |
| 742 | return 0; |
| 743 | /* Truncate reads beyond end of attribute. */ |
| 744 | if (pos + count > na->data_size) { |
| 745 | if (pos >= na->data_size) { |
| 746 | return 0; |
| 747 | } |
| 748 | count = na->data_size - pos; |
| 749 | } |
| 750 | /* If it is a resident attribute, simply use ntfs_attr_pread(). */ |
| 751 | if (!NAttrNonResident(na)) |
| 752 | return ntfs_attr_pread(na, pos, count, b); |
| 753 | total = total2 = 0; |
| 754 | /* Zero out reads beyond initialized size. */ |
| 755 | if (pos + count > na->initialized_size) { |
| 756 | if (pos >= na->initialized_size) { |
| 757 | memset(b, 0, count); |
| 758 | return count; |
| 759 | } |
| 760 | total2 = pos + count - na->initialized_size; |
| 761 | count -= total2; |
| 762 | memset((u8*)b + count, 0, total2); |
| 763 | } |
| 764 | vol = na->ni->vol; |
| 765 | cb_size = na->compression_block_size; |
| 766 | cb_size_mask = cb_size - 1UL; |
| 767 | cb_clusters = na->compression_block_clusters; |
| 768 | |
| 769 | /* Need a temporary buffer for each loaded compression block. */ |
| 770 | cb = (u8*)ntfs_malloc(cb_size); |
| 771 | if (!cb) |
| 772 | return -1; |
| 773 | |
| 774 | /* Need a temporary buffer for each uncompressed block. */ |
| 775 | dest = (u8*)ntfs_malloc(cb_size); |
| 776 | if (!dest) { |
| 777 | free(cb); |
| 778 | return -1; |
| 779 | } |
| 780 | /* |
| 781 | * The first vcn in the first compression block (cb) which we need to |
| 782 | * decompress. |
| 783 | */ |
| 784 | start_vcn = (pos & ~cb_size_mask) >> vol->cluster_size_bits; |
| 785 | /* Offset in the uncompressed cb at which to start reading data. */ |
| 786 | ofs = pos & cb_size_mask; |
| 787 | /* |
| 788 | * The first vcn in the cb after the last cb which we need to |
| 789 | * decompress. |
| 790 | */ |
| 791 | end_vcn = ((pos + count + cb_size - 1) & ~cb_size_mask) >> |
| 792 | vol->cluster_size_bits; |
| 793 | /* Number of compression blocks (cbs) in the wanted vcn range. */ |
| 794 | nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits >> |
| 795 | na->compression_block_size_bits; |
| 796 | cb_end = cb + cb_size; |
| 797 | do_next_cb: |
| 798 | nr_cbs--; |
| 799 | cb_pos = cb; |
| 800 | vcn = start_vcn; |
| 801 | start_vcn += cb_clusters; |
| 802 | |
| 803 | /* Check whether the compression block is sparse. */ |
| 804 | rl = ntfs_attr_find_vcn(na, vcn); |
| 805 | if (!rl || rl->lcn < LCN_HOLE) { |
| 806 | free(cb); |
| 807 | free(dest); |
| 808 | if (total) |
| 809 | return total; |
| 810 | /* FIXME: Do we want EIO or the error code? (AIA) */ |
| 811 | errno = EIO; |
| 812 | return -1; |
| 813 | } |
| 814 | if (rl->lcn == LCN_HOLE) { |
| 815 | /* Sparse cb, zero out destination range overlapping the cb. */ |
| 816 | ntfs_log_debug("Found sparse compression block.\n"); |
| 817 | to_read = min(count, cb_size - ofs); |
| 818 | memset(b, 0, to_read); |
| 819 | ofs = 0; |
| 820 | total += to_read; |
| 821 | count -= to_read; |
| 822 | b = (u8*)b + to_read; |
| 823 | } else if (!ntfs_is_cb_compressed(na, rl, vcn, cb_clusters)) { |
| 824 | s64 tdata_size, tinitialized_size; |
| 825 | /* |
| 826 | * Uncompressed cb, read it straight into the destination range |
| 827 | * overlapping the cb. |
| 828 | */ |
| 829 | ntfs_log_debug("Found uncompressed compression block.\n"); |
| 830 | /* |
| 831 | * Read the uncompressed data into the destination buffer. |
| 832 | * NOTE: We cheat a little bit here by marking the attribute as |
| 833 | * not compressed in the ntfs_attr structure so that we can |
| 834 | * read the data by simply using ntfs_attr_pread(). (-8 |
| 835 | * NOTE: we have to modify data_size and initialized_size |
| 836 | * temporarily as well... |
| 837 | */ |
| 838 | to_read = min(count, cb_size - ofs); |
| 839 | ofs += vcn << vol->cluster_size_bits; |
| 840 | NAttrClearCompressed(na); |
| 841 | na->data_flags &= ~ATTR_COMPRESSION_MASK; |
| 842 | tdata_size = na->data_size; |
| 843 | tinitialized_size = na->initialized_size; |
| 844 | na->data_size = na->initialized_size = na->allocated_size; |
| 845 | do { |
| 846 | br = ntfs_attr_pread(na, ofs, to_read, b); |
| 847 | if (br <= 0) { |
| 848 | if (!br) { |
| 849 | ntfs_log_error("Failed to read an" |
| 850 | " uncompressed cluster," |
| 851 | " inode %lld offs 0x%llx\n", |
| 852 | (long long)na->ni->mft_no, |
| 853 | (long long)ofs); |
| 854 | errno = EIO; |
| 855 | } |
| 856 | err = errno; |
| 857 | na->data_size = tdata_size; |
| 858 | na->initialized_size = tinitialized_size; |
| 859 | na->ni->flags |= compression; |
| 860 | na->data_flags = data_flags; |
| 861 | free(cb); |
| 862 | free(dest); |
| 863 | if (total) |
| 864 | return total; |
| 865 | errno = err; |
| 866 | return br; |
| 867 | } |
| 868 | total += br; |
| 869 | count -= br; |
| 870 | b = (u8*)b + br; |
| 871 | to_read -= br; |
| 872 | ofs += br; |
| 873 | } while (to_read > 0); |
| 874 | na->data_size = tdata_size; |
| 875 | na->initialized_size = tinitialized_size; |
| 876 | na->ni->flags |= compression; |
| 877 | na->data_flags = data_flags; |
| 878 | ofs = 0; |
| 879 | } else { |
| 880 | s64 tdata_size, tinitialized_size; |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 881 | u32 decompsz; |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 882 | |
| 883 | /* |
| 884 | * Compressed cb, decompress it into the temporary buffer, then |
| 885 | * copy the data to the destination range overlapping the cb. |
| 886 | */ |
| 887 | ntfs_log_debug("Found compressed compression block.\n"); |
| 888 | /* |
| 889 | * Read the compressed data into the temporary buffer. |
| 890 | * NOTE: We cheat a little bit here by marking the attribute as |
| 891 | * not compressed in the ntfs_attr structure so that we can |
| 892 | * read the raw, compressed data by simply using |
| 893 | * ntfs_attr_pread(). (-8 |
| 894 | * NOTE: We have to modify data_size and initialized_size |
| 895 | * temporarily as well... |
| 896 | */ |
| 897 | to_read = cb_size; |
| 898 | NAttrClearCompressed(na); |
| 899 | na->data_flags &= ~ATTR_COMPRESSION_MASK; |
| 900 | tdata_size = na->data_size; |
| 901 | tinitialized_size = na->initialized_size; |
| 902 | na->data_size = na->initialized_size = na->allocated_size; |
| 903 | do { |
| 904 | br = ntfs_attr_pread(na, |
| 905 | (vcn << vol->cluster_size_bits) + |
| 906 | (cb_pos - cb), to_read, cb_pos); |
| 907 | if (br <= 0) { |
| 908 | if (!br) { |
| 909 | ntfs_log_error("Failed to read a" |
| 910 | " compressed cluster, " |
| 911 | " inode %lld offs 0x%llx\n", |
| 912 | (long long)na->ni->mft_no, |
| 913 | (long long)(vcn << vol->cluster_size_bits)); |
| 914 | errno = EIO; |
| 915 | } |
| 916 | err = errno; |
| 917 | na->data_size = tdata_size; |
| 918 | na->initialized_size = tinitialized_size; |
| 919 | na->ni->flags |= compression; |
| 920 | na->data_flags = data_flags; |
| 921 | free(cb); |
| 922 | free(dest); |
| 923 | if (total) |
| 924 | return total; |
| 925 | errno = err; |
| 926 | return br; |
| 927 | } |
| 928 | cb_pos += br; |
| 929 | to_read -= br; |
| 930 | } while (to_read > 0); |
| 931 | na->data_size = tdata_size; |
| 932 | na->initialized_size = tinitialized_size; |
| 933 | na->ni->flags |= compression; |
| 934 | na->data_flags = data_flags; |
| 935 | /* Just a precaution. */ |
| 936 | if (cb_pos + 2 <= cb_end) |
| 937 | *(u16*)cb_pos = 0; |
| 938 | ntfs_log_debug("Successfully read the compression block.\n"); |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 939 | /* Do not decompress beyond the requested block */ |
| 940 | to_read = min(count, cb_size - ofs); |
| 941 | decompsz = ((ofs + to_read - 1) | (NTFS_SB_SIZE - 1)) + 1; |
| 942 | if (ntfs_decompress(dest, decompsz, cb, cb_size) < 0) { |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 943 | err = errno; |
| 944 | free(cb); |
| 945 | free(dest); |
| 946 | if (total) |
| 947 | return total; |
| 948 | errno = err; |
| 949 | return -1; |
| 950 | } |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 951 | memcpy(b, dest + ofs, to_read); |
| 952 | total += to_read; |
| 953 | count -= to_read; |
| 954 | b = (u8*)b + to_read; |
| 955 | ofs = 0; |
| 956 | } |
| 957 | /* Do we have more work to do? */ |
| 958 | if (nr_cbs) |
| 959 | goto do_next_cb; |
| 960 | /* We no longer need the buffers. */ |
| 961 | free(cb); |
| 962 | free(dest); |
| 963 | /* Return number of bytes read. */ |
| 964 | return total + total2; |
| 965 | } |
| 966 | |
| 967 | /* |
| 968 | * Read data from a set of clusters |
| 969 | * |
| 970 | * Returns the amount of data read |
| 971 | */ |
| 972 | |
| 973 | static u32 read_clusters(ntfs_volume *vol, const runlist_element *rl, |
| 974 | s64 offs, u32 to_read, char *inbuf) |
| 975 | { |
| 976 | u32 count; |
| 977 | int xgot; |
| 978 | u32 got; |
| 979 | s64 xpos; |
| 980 | BOOL first; |
| 981 | char *xinbuf; |
| 982 | const runlist_element *xrl; |
| 983 | |
| 984 | got = 0; |
| 985 | xrl = rl; |
| 986 | xinbuf = inbuf; |
| 987 | first = TRUE; |
| 988 | do { |
| 989 | count = xrl->length << vol->cluster_size_bits; |
| 990 | xpos = xrl->lcn << vol->cluster_size_bits; |
| 991 | if (first) { |
| 992 | count -= offs; |
| 993 | xpos += offs; |
| 994 | } |
| 995 | if ((to_read - got) < count) |
| 996 | count = to_read - got; |
| 997 | xgot = ntfs_pread(vol->dev, xpos, count, xinbuf); |
| 998 | if (xgot == (int)count) { |
| 999 | got += count; |
| 1000 | xpos += count; |
| 1001 | xinbuf += count; |
| 1002 | xrl++; |
| 1003 | } |
| 1004 | first = FALSE; |
| 1005 | } while ((xgot == (int)count) && (got < to_read)); |
| 1006 | return (got); |
| 1007 | } |
| 1008 | |
| 1009 | /* |
| 1010 | * Write data to a set of clusters |
| 1011 | * |
| 1012 | * Returns the amount of data written |
| 1013 | */ |
| 1014 | |
| 1015 | static s32 write_clusters(ntfs_volume *vol, const runlist_element *rl, |
| 1016 | s64 offs, s32 to_write, const char *outbuf) |
| 1017 | { |
| 1018 | s32 count; |
| 1019 | s32 put, xput; |
| 1020 | s64 xpos; |
| 1021 | BOOL first; |
| 1022 | const char *xoutbuf; |
| 1023 | const runlist_element *xrl; |
| 1024 | |
| 1025 | put = 0; |
| 1026 | xrl = rl; |
| 1027 | xoutbuf = outbuf; |
| 1028 | first = TRUE; |
| 1029 | do { |
| 1030 | count = xrl->length << vol->cluster_size_bits; |
| 1031 | xpos = xrl->lcn << vol->cluster_size_bits; |
| 1032 | if (first) { |
| 1033 | count -= offs; |
| 1034 | xpos += offs; |
| 1035 | } |
| 1036 | if ((to_write - put) < count) |
| 1037 | count = to_write - put; |
| 1038 | xput = ntfs_pwrite(vol->dev, xpos, count, xoutbuf); |
| 1039 | if (xput == count) { |
| 1040 | put += count; |
| 1041 | xpos += count; |
| 1042 | xoutbuf += count; |
| 1043 | xrl++; |
| 1044 | } |
| 1045 | first = FALSE; |
| 1046 | } while ((xput == count) && (put < to_write)); |
| 1047 | return (put); |
| 1048 | } |
| 1049 | |
| 1050 | |
| 1051 | /* |
| 1052 | * Compress and write a set of blocks |
| 1053 | * |
| 1054 | * returns the size actually written (rounded to a full cluster) |
| 1055 | * or 0 if all zeroes (nothing is written) |
| 1056 | * or -1 if could not compress (nothing is written) |
| 1057 | * or -2 if there were an irrecoverable error (errno set) |
| 1058 | */ |
| 1059 | |
| 1060 | static s32 ntfs_comp_set(ntfs_attr *na, runlist_element *rl, |
| 1061 | s64 offs, u32 insz, const char *inbuf) |
| 1062 | { |
| 1063 | ntfs_volume *vol; |
| 1064 | char *outbuf; |
| 1065 | char *pbuf; |
| 1066 | u32 compsz; |
| 1067 | s32 written; |
| 1068 | s32 rounded; |
| 1069 | unsigned int clsz; |
| 1070 | u32 p; |
| 1071 | unsigned int sz; |
| 1072 | unsigned int bsz; |
| 1073 | BOOL fail; |
| 1074 | BOOL allzeroes; |
| 1075 | /* a single compressed zero */ |
| 1076 | static char onezero[] = { 0x01, 0xb0, 0x00, 0x00 } ; |
| 1077 | /* a couple of compressed zeroes */ |
| 1078 | static char twozeroes[] = { 0x02, 0xb0, 0x00, 0x00, 0x00 } ; |
| 1079 | /* more compressed zeroes, to be followed by some count */ |
| 1080 | static char morezeroes[] = { 0x03, 0xb0, 0x02, 0x00 } ; |
| 1081 | |
| 1082 | vol = na->ni->vol; |
| 1083 | written = -1; /* default return */ |
| 1084 | clsz = 1 << vol->cluster_size_bits; |
| 1085 | /* may need 2 extra bytes per block and 2 more bytes */ |
| 1086 | outbuf = (char*)ntfs_malloc(na->compression_block_size |
| 1087 | + 2*(na->compression_block_size/NTFS_SB_SIZE) |
| 1088 | + 2); |
| 1089 | if (outbuf) { |
| 1090 | fail = FALSE; |
| 1091 | compsz = 0; |
| 1092 | allzeroes = TRUE; |
| 1093 | for (p=0; (p<insz) && !fail; p+=NTFS_SB_SIZE) { |
| 1094 | if ((p + NTFS_SB_SIZE) < insz) |
| 1095 | bsz = NTFS_SB_SIZE; |
| 1096 | else |
| 1097 | bsz = insz - p; |
| 1098 | pbuf = &outbuf[compsz]; |
| 1099 | sz = ntfs_compress_block(&inbuf[p],bsz,pbuf); |
| 1100 | /* fail if all the clusters (or more) are needed */ |
| 1101 | if (!sz || ((compsz + sz + clsz + 2) |
| 1102 | > na->compression_block_size)) |
| 1103 | fail = TRUE; |
| 1104 | else { |
| 1105 | if (allzeroes) { |
| 1106 | /* check whether this is all zeroes */ |
| 1107 | switch (sz) { |
| 1108 | case 4 : |
| 1109 | allzeroes = !memcmp( |
| 1110 | pbuf,onezero,4); |
| 1111 | break; |
| 1112 | case 5 : |
| 1113 | allzeroes = !memcmp( |
| 1114 | pbuf,twozeroes,5); |
| 1115 | break; |
| 1116 | case 6 : |
| 1117 | allzeroes = !memcmp( |
| 1118 | pbuf,morezeroes,4); |
| 1119 | break; |
| 1120 | default : |
| 1121 | allzeroes = FALSE; |
| 1122 | break; |
| 1123 | } |
| 1124 | } |
| 1125 | compsz += sz; |
| 1126 | } |
| 1127 | } |
| 1128 | if (!fail && !allzeroes) { |
| 1129 | /* add a couple of null bytes, space has been checked */ |
| 1130 | outbuf[compsz++] = 0; |
| 1131 | outbuf[compsz++] = 0; |
| 1132 | /* write a full cluster, to avoid partial reading */ |
| 1133 | rounded = ((compsz - 1) | (clsz - 1)) + 1; |
Steve Kondik | e68cb60 | 2016-08-28 00:45:36 -0700 | [diff] [blame] | 1134 | memset(&outbuf[compsz], 0, rounded - compsz); |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 1135 | written = write_clusters(vol, rl, offs, rounded, outbuf); |
| 1136 | if (written != rounded) { |
| 1137 | /* |
| 1138 | * TODO : previously written text has been |
| 1139 | * spoilt, should return a specific error |
| 1140 | */ |
| 1141 | ntfs_log_error("error writing compressed data\n"); |
| 1142 | errno = EIO; |
| 1143 | written = -2; |
| 1144 | } |
| 1145 | } else |
| 1146 | if (!fail) |
| 1147 | written = 0; |
| 1148 | free(outbuf); |
| 1149 | } |
| 1150 | return (written); |
| 1151 | } |
| 1152 | |
| 1153 | /* |
| 1154 | * Check the validity of a compressed runlist |
| 1155 | * The check starts at the beginning of current run and ends |
| 1156 | * at the end of runlist |
| 1157 | * errno is set if the runlist is not valid |
| 1158 | */ |
| 1159 | |
| 1160 | static BOOL valid_compressed_run(ntfs_attr *na, runlist_element *rl, |
| 1161 | BOOL fullcheck, const char *text) |
| 1162 | { |
| 1163 | runlist_element *xrl; |
| 1164 | const char *err; |
| 1165 | BOOL ok = TRUE; |
| 1166 | |
| 1167 | xrl = rl; |
| 1168 | while (xrl->vcn & (na->compression_block_clusters - 1)) |
| 1169 | xrl--; |
| 1170 | err = (const char*)NULL; |
| 1171 | while (xrl->length) { |
| 1172 | if ((xrl->vcn + xrl->length) != xrl[1].vcn) |
| 1173 | err = "Runs not adjacent"; |
| 1174 | if (xrl->lcn == LCN_HOLE) { |
| 1175 | if ((xrl->vcn + xrl->length) |
| 1176 | & (na->compression_block_clusters - 1)) { |
| 1177 | err = "Invalid hole"; |
| 1178 | } |
| 1179 | if (fullcheck && (xrl[1].lcn == LCN_HOLE)) { |
| 1180 | err = "Adjacent holes"; |
| 1181 | } |
| 1182 | } |
| 1183 | if (err) { |
| 1184 | ntfs_log_error("%s at %s index %ld inode %lld\n", |
| 1185 | err, text, (long)(xrl - na->rl), |
| 1186 | (long long)na->ni->mft_no); |
| 1187 | errno = EIO; |
| 1188 | ok = FALSE; |
| 1189 | err = (const char*)NULL; |
| 1190 | } |
| 1191 | xrl++; |
| 1192 | } |
| 1193 | return (ok); |
| 1194 | } |
| 1195 | |
| 1196 | /* |
| 1197 | * Free unneeded clusters after overwriting compressed data |
| 1198 | * |
| 1199 | * This generally requires one or two empty slots at the end of runlist, |
| 1200 | * but we do not want to reallocate the runlist here because |
| 1201 | * there are many pointers to it. |
| 1202 | * So the empty slots have to be reserved beforehand |
| 1203 | * |
| 1204 | * Returns zero unless some error occurred (described by errno) |
| 1205 | * |
| 1206 | * +======= start of block =====+ |
| 1207 | * 0 |A chunk may overflow | <-- rl usedcnt : A + B |
| 1208 | * |A on previous block | then B |
| 1209 | * |A | |
| 1210 | * +-- end of allocated chunk --+ freelength : C |
| 1211 | * |B | (incl overflow) |
| 1212 | * +== end of compressed data ==+ |
| 1213 | * |C | <-- freerl freecnt : C + D |
| 1214 | * |C chunk may overflow | |
| 1215 | * |C on next block | |
| 1216 | * +-- end of allocated chunk --+ |
| 1217 | * |D | |
| 1218 | * |D chunk may overflow | |
| 1219 | * 15 |D on next block | |
| 1220 | * +======== end of block ======+ |
| 1221 | * |
| 1222 | */ |
| 1223 | |
| 1224 | static int ntfs_compress_overwr_free(ntfs_attr *na, runlist_element *rl, |
| 1225 | s32 usedcnt, s32 freecnt, VCN *update_from) |
| 1226 | { |
| 1227 | BOOL beginhole; |
| 1228 | BOOL mergeholes; |
| 1229 | s32 oldlength; |
| 1230 | s32 freelength; |
| 1231 | s64 freelcn; |
| 1232 | s64 freevcn; |
| 1233 | runlist_element *freerl; |
| 1234 | ntfs_volume *vol; |
| 1235 | s32 carry; |
| 1236 | int res; |
| 1237 | |
| 1238 | vol = na->ni->vol; |
| 1239 | res = 0; |
| 1240 | freelcn = rl->lcn + usedcnt; |
| 1241 | freevcn = rl->vcn + usedcnt; |
| 1242 | freelength = rl->length - usedcnt; |
| 1243 | beginhole = !usedcnt && !rl->vcn; |
| 1244 | /* can merge with hole before ? */ |
| 1245 | mergeholes = !usedcnt |
| 1246 | && rl[0].vcn |
| 1247 | && (rl[-1].lcn == LCN_HOLE); |
| 1248 | /* truncate current run, carry to subsequent hole */ |
| 1249 | carry = freelength; |
| 1250 | oldlength = rl->length; |
| 1251 | if (mergeholes) { |
| 1252 | /* merging with a hole before */ |
| 1253 | freerl = rl; |
| 1254 | } else { |
| 1255 | rl->length -= freelength; /* warning : can be zero */ |
| 1256 | freerl = ++rl; |
| 1257 | } |
| 1258 | if (!mergeholes && (usedcnt || beginhole)) { |
| 1259 | s32 freed; |
| 1260 | runlist_element *frl; |
| 1261 | runlist_element *erl; |
| 1262 | int holes = 0; |
| 1263 | BOOL threeparts; |
| 1264 | |
| 1265 | /* free the unneeded clusters from initial run, then freerl */ |
| 1266 | threeparts = (freelength > freecnt); |
| 1267 | freed = 0; |
| 1268 | frl = freerl; |
| 1269 | if (freelength) { |
| 1270 | res = ntfs_cluster_free_basic(vol,freelcn, |
| 1271 | (threeparts ? freecnt : freelength)); |
| 1272 | if (!res) |
| 1273 | freed += (threeparts ? freecnt : freelength); |
| 1274 | if (!usedcnt) { |
| 1275 | holes++; |
| 1276 | freerl--; |
| 1277 | freerl->length += (threeparts |
| 1278 | ? freecnt : freelength); |
| 1279 | if (freerl->vcn < *update_from) |
| 1280 | *update_from = freerl->vcn; |
| 1281 | } |
| 1282 | } |
| 1283 | while (!res && frl->length && (freed < freecnt)) { |
| 1284 | if (frl->length <= (freecnt - freed)) { |
| 1285 | res = ntfs_cluster_free_basic(vol, frl->lcn, |
| 1286 | frl->length); |
| 1287 | if (!res) { |
| 1288 | freed += frl->length; |
| 1289 | frl->lcn = LCN_HOLE; |
| 1290 | frl->length += carry; |
| 1291 | carry = 0; |
| 1292 | holes++; |
| 1293 | } |
| 1294 | } else { |
| 1295 | res = ntfs_cluster_free_basic(vol, frl->lcn, |
| 1296 | freecnt - freed); |
| 1297 | if (!res) { |
| 1298 | frl->lcn += freecnt - freed; |
| 1299 | frl->vcn += freecnt - freed; |
| 1300 | frl->length -= freecnt - freed; |
| 1301 | freed = freecnt; |
| 1302 | } |
| 1303 | } |
| 1304 | frl++; |
| 1305 | } |
| 1306 | na->compressed_size -= freed << vol->cluster_size_bits; |
| 1307 | switch (holes) { |
| 1308 | case 0 : |
| 1309 | /* there are no hole, must insert one */ |
| 1310 | /* space for hole has been prereserved */ |
| 1311 | if (freerl->lcn == LCN_HOLE) { |
| 1312 | if (threeparts) { |
| 1313 | erl = freerl; |
| 1314 | while (erl->length) |
| 1315 | erl++; |
| 1316 | do { |
| 1317 | erl[2] = *erl; |
| 1318 | } while (erl-- != freerl); |
| 1319 | |
| 1320 | freerl[1].length = freelength - freecnt; |
| 1321 | freerl->length = freecnt; |
| 1322 | freerl[1].lcn = freelcn + freecnt; |
| 1323 | freerl[1].vcn = freevcn + freecnt; |
| 1324 | freerl[2].lcn = LCN_HOLE; |
| 1325 | freerl[2].vcn = freerl[1].vcn |
| 1326 | + freerl[1].length; |
| 1327 | freerl->vcn = freevcn; |
| 1328 | } else { |
| 1329 | freerl->vcn = freevcn; |
| 1330 | freerl->length += freelength; |
| 1331 | } |
| 1332 | } else { |
| 1333 | erl = freerl; |
| 1334 | while (erl->length) |
| 1335 | erl++; |
| 1336 | if (threeparts) { |
| 1337 | do { |
| 1338 | erl[2] = *erl; |
| 1339 | } while (erl-- != freerl); |
| 1340 | freerl[1].lcn = freelcn + freecnt; |
| 1341 | freerl[1].vcn = freevcn + freecnt; |
| 1342 | freerl[1].length = oldlength - usedcnt - freecnt; |
| 1343 | } else { |
| 1344 | do { |
| 1345 | erl[1] = *erl; |
| 1346 | } while (erl-- != freerl); |
| 1347 | } |
| 1348 | freerl->lcn = LCN_HOLE; |
| 1349 | freerl->vcn = freevcn; |
| 1350 | freerl->length = freecnt; |
| 1351 | } |
| 1352 | break; |
| 1353 | case 1 : |
| 1354 | /* there is a single hole, may have to merge */ |
| 1355 | freerl->vcn = freevcn; |
| 1356 | freerl->length = freecnt; |
| 1357 | if (freerl[1].lcn == LCN_HOLE) { |
| 1358 | freerl->length += freerl[1].length; |
| 1359 | erl = freerl; |
| 1360 | do { |
| 1361 | erl++; |
| 1362 | *erl = erl[1]; |
| 1363 | } while (erl->length); |
| 1364 | } |
| 1365 | break; |
| 1366 | default : |
| 1367 | /* there were several holes, must merge them */ |
| 1368 | freerl->lcn = LCN_HOLE; |
| 1369 | freerl->vcn = freevcn; |
| 1370 | freerl->length = freecnt; |
| 1371 | if (freerl[holes].lcn == LCN_HOLE) { |
| 1372 | freerl->length += freerl[holes].length; |
| 1373 | holes++; |
| 1374 | } |
| 1375 | erl = freerl; |
| 1376 | do { |
| 1377 | erl++; |
| 1378 | *erl = erl[holes - 1]; |
| 1379 | } while (erl->length); |
| 1380 | break; |
| 1381 | } |
| 1382 | } else { |
| 1383 | s32 freed; |
| 1384 | runlist_element *frl; |
| 1385 | runlist_element *xrl; |
| 1386 | |
| 1387 | freed = 0; |
| 1388 | frl = freerl--; |
| 1389 | if (freerl->vcn < *update_from) |
| 1390 | *update_from = freerl->vcn; |
| 1391 | while (!res && frl->length && (freed < freecnt)) { |
| 1392 | if (frl->length <= (freecnt - freed)) { |
| 1393 | freerl->length += frl->length; |
| 1394 | freed += frl->length; |
| 1395 | res = ntfs_cluster_free_basic(vol, frl->lcn, |
| 1396 | frl->length); |
| 1397 | frl++; |
| 1398 | } else { |
| 1399 | freerl->length += freecnt - freed; |
| 1400 | res = ntfs_cluster_free_basic(vol, frl->lcn, |
| 1401 | freecnt - freed); |
| 1402 | frl->lcn += freecnt - freed; |
| 1403 | frl->vcn += freecnt - freed; |
| 1404 | frl->length -= freecnt - freed; |
| 1405 | freed = freecnt; |
| 1406 | } |
| 1407 | } |
| 1408 | /* remove unneded runlist entries */ |
| 1409 | xrl = freerl; |
| 1410 | /* group with next run if also a hole */ |
| 1411 | if (frl->length && (frl->lcn == LCN_HOLE)) { |
| 1412 | xrl->length += frl->length; |
| 1413 | frl++; |
| 1414 | } |
| 1415 | while (frl->length) { |
| 1416 | *++xrl = *frl++; |
| 1417 | } |
| 1418 | *++xrl = *frl; /* terminator */ |
| 1419 | na->compressed_size -= freed << vol->cluster_size_bits; |
| 1420 | } |
| 1421 | return (res); |
| 1422 | } |
| 1423 | |
| 1424 | |
| 1425 | /* |
| 1426 | * Free unneeded clusters after compression |
| 1427 | * |
| 1428 | * This generally requires one or two empty slots at the end of runlist, |
| 1429 | * but we do not want to reallocate the runlist here because |
| 1430 | * there are many pointers to it. |
| 1431 | * So the empty slots have to be reserved beforehand |
| 1432 | * |
| 1433 | * Returns zero unless some error occurred (described by errno) |
| 1434 | */ |
| 1435 | |
| 1436 | static int ntfs_compress_free(ntfs_attr *na, runlist_element *rl, |
| 1437 | s64 used, s64 reserved, BOOL appending, |
| 1438 | VCN *update_from) |
| 1439 | { |
| 1440 | s32 freecnt; |
| 1441 | s32 usedcnt; |
| 1442 | int res; |
| 1443 | s64 freelcn; |
| 1444 | s64 freevcn; |
| 1445 | s32 freelength; |
| 1446 | BOOL mergeholes; |
| 1447 | BOOL beginhole; |
| 1448 | ntfs_volume *vol; |
| 1449 | runlist_element *freerl; |
| 1450 | |
| 1451 | res = -1; /* default return */ |
| 1452 | vol = na->ni->vol; |
| 1453 | freecnt = (reserved - used) >> vol->cluster_size_bits; |
| 1454 | usedcnt = (reserved >> vol->cluster_size_bits) - freecnt; |
| 1455 | if (rl->vcn < *update_from) |
| 1456 | *update_from = rl->vcn; |
| 1457 | /* skip entries fully used, if any */ |
| 1458 | while (rl->length && (rl->length < usedcnt)) { |
| 1459 | usedcnt -= rl->length; /* must be > 0 */ |
| 1460 | rl++; |
| 1461 | } |
| 1462 | if (rl->length) { |
| 1463 | /* |
| 1464 | * Splitting the current allocation block requires |
| 1465 | * an extra runlist element to create the hole. |
| 1466 | * The required entry has been prereserved when |
| 1467 | * mapping the runlist. |
| 1468 | */ |
| 1469 | /* get the free part in initial run */ |
| 1470 | freelcn = rl->lcn + usedcnt; |
| 1471 | freevcn = rl->vcn + usedcnt; |
| 1472 | /* new count of allocated clusters */ |
| 1473 | if (!((freevcn + freecnt) |
| 1474 | & (na->compression_block_clusters - 1))) { |
| 1475 | if (!appending) |
| 1476 | res = ntfs_compress_overwr_free(na,rl, |
| 1477 | usedcnt,freecnt,update_from); |
| 1478 | else { |
| 1479 | freelength = rl->length - usedcnt; |
| 1480 | beginhole = !usedcnt && !rl->vcn; |
| 1481 | mergeholes = !usedcnt |
| 1482 | && rl[0].vcn |
| 1483 | && (rl[-1].lcn == LCN_HOLE); |
| 1484 | if (mergeholes) { |
| 1485 | s32 carry; |
| 1486 | |
| 1487 | /* shorten the runs which have free space */ |
| 1488 | carry = freecnt; |
| 1489 | freerl = rl; |
| 1490 | while (freerl->length < carry) { |
| 1491 | carry -= freerl->length; |
| 1492 | freerl++; |
| 1493 | } |
| 1494 | freerl->length = carry; |
| 1495 | freerl = rl; |
| 1496 | } else { |
| 1497 | rl->length = usedcnt; /* can be zero ? */ |
| 1498 | freerl = ++rl; |
| 1499 | } |
| 1500 | if ((freelength > 0) |
| 1501 | && !mergeholes |
| 1502 | && (usedcnt || beginhole)) { |
| 1503 | /* |
| 1504 | * move the unused part to the end. Doing so, |
| 1505 | * the vcn will be out of order. This does |
| 1506 | * not harm, the vcn are meaningless now, and |
| 1507 | * only the lcn are meaningful for freeing. |
| 1508 | */ |
| 1509 | /* locate current end */ |
| 1510 | while (rl->length) |
| 1511 | rl++; |
| 1512 | /* new terminator relocated */ |
| 1513 | rl[1].vcn = rl->vcn; |
| 1514 | rl[1].lcn = LCN_ENOENT; |
| 1515 | rl[1].length = 0; |
| 1516 | /* hole, currently allocated */ |
| 1517 | rl->vcn = freevcn; |
| 1518 | rl->lcn = freelcn; |
| 1519 | rl->length = freelength; |
| 1520 | } else { |
| 1521 | /* why is this different from the begin hole case ? */ |
| 1522 | if ((freelength > 0) |
| 1523 | && !mergeholes |
| 1524 | && !usedcnt) { |
| 1525 | freerl--; |
| 1526 | freerl->length = freelength; |
| 1527 | if (freerl->vcn < *update_from) |
| 1528 | *update_from |
| 1529 | = freerl->vcn; |
| 1530 | } |
| 1531 | } |
| 1532 | /* free the hole */ |
| 1533 | res = ntfs_cluster_free_from_rl(vol,freerl); |
| 1534 | if (!res) { |
| 1535 | na->compressed_size -= freecnt |
| 1536 | << vol->cluster_size_bits; |
| 1537 | if (mergeholes) { |
| 1538 | /* merge with adjacent hole */ |
| 1539 | freerl--; |
| 1540 | freerl->length += freecnt; |
| 1541 | } else { |
| 1542 | if (beginhole) |
| 1543 | freerl--; |
| 1544 | /* mark hole as free */ |
| 1545 | freerl->lcn = LCN_HOLE; |
| 1546 | freerl->vcn = freevcn; |
| 1547 | freerl->length = freecnt; |
| 1548 | } |
| 1549 | if (freerl->vcn < *update_from) |
| 1550 | *update_from = freerl->vcn; |
| 1551 | /* and set up the new end */ |
| 1552 | freerl[1].lcn = LCN_ENOENT; |
| 1553 | freerl[1].vcn = freevcn + freecnt; |
| 1554 | freerl[1].length = 0; |
| 1555 | } |
| 1556 | } |
| 1557 | } else { |
| 1558 | ntfs_log_error("Bad end of a compression block set\n"); |
| 1559 | errno = EIO; |
| 1560 | } |
| 1561 | } else { |
| 1562 | ntfs_log_error("No cluster to free after compression\n"); |
| 1563 | errno = EIO; |
| 1564 | } |
Steve Kondik | 79165c3 | 2015-11-09 19:43:00 -0800 | [diff] [blame] | 1565 | NAttrSetRunlistDirty(na); |
Steve Kondik | 2111ad7 | 2013-07-07 12:07:44 -0700 | [diff] [blame] | 1566 | return (res); |
| 1567 | } |
| 1568 | |
| 1569 | /* |
| 1570 | * Read existing data, decompress and append buffer |
| 1571 | * Do nothing if something fails |
| 1572 | */ |
| 1573 | |
| 1574 | static int ntfs_read_append(ntfs_attr *na, const runlist_element *rl, |
| 1575 | s64 offs, u32 compsz, s32 pos, BOOL appending, |
| 1576 | char *outbuf, s64 to_write, const void *b) |
| 1577 | { |
| 1578 | int fail = 1; |
| 1579 | char *compbuf; |
| 1580 | u32 decompsz; |
| 1581 | u32 got; |
| 1582 | |
| 1583 | if (compsz == na->compression_block_size) { |
| 1584 | /* if the full block was requested, it was a hole */ |
| 1585 | memset(outbuf,0,compsz); |
| 1586 | memcpy(&outbuf[pos],b,to_write); |
| 1587 | fail = 0; |
| 1588 | } else { |
| 1589 | compbuf = (char*)ntfs_malloc(compsz); |
| 1590 | if (compbuf) { |
| 1591 | /* must align to full block for decompression */ |
| 1592 | if (appending) |
| 1593 | decompsz = ((pos - 1) | (NTFS_SB_SIZE - 1)) + 1; |
| 1594 | else |
| 1595 | decompsz = na->compression_block_size; |
| 1596 | got = read_clusters(na->ni->vol, rl, offs, |
| 1597 | compsz, compbuf); |
| 1598 | if ((got == compsz) |
| 1599 | && !ntfs_decompress((u8*)outbuf,decompsz, |
| 1600 | (u8*)compbuf,compsz)) { |
| 1601 | memcpy(&outbuf[pos],b,to_write); |
| 1602 | fail = 0; |
| 1603 | } |
| 1604 | free(compbuf); |
| 1605 | } |
| 1606 | } |
| 1607 | return (fail); |
| 1608 | } |
| 1609 | |
| 1610 | /* |
| 1611 | * Flush a full compression block |
| 1612 | * |
| 1613 | * returns the size actually written (rounded to a full cluster) |
| 1614 | * or 0 if could not compress (and written uncompressed) |
| 1615 | * or -1 if there were an irrecoverable error (errno set) |
| 1616 | */ |
| 1617 | |
| 1618 | static s32 ntfs_flush(ntfs_attr *na, runlist_element *rl, s64 offs, |
| 1619 | const char *outbuf, s32 count, BOOL compress, |
| 1620 | BOOL appending, VCN *update_from) |
| 1621 | { |
| 1622 | s32 rounded; |
| 1623 | s32 written; |
| 1624 | int clsz; |
| 1625 | |
| 1626 | if (compress) { |
| 1627 | written = ntfs_comp_set(na, rl, offs, count, outbuf); |
| 1628 | if (written == -1) |
| 1629 | compress = FALSE; |
| 1630 | if ((written >= 0) |
| 1631 | && ntfs_compress_free(na,rl,offs + written, |
| 1632 | offs + na->compression_block_size, appending, |
| 1633 | update_from)) |
| 1634 | written = -1; |
| 1635 | } else |
| 1636 | written = 0; |
| 1637 | if (!compress) { |
| 1638 | clsz = 1 << na->ni->vol->cluster_size_bits; |
| 1639 | rounded = ((count - 1) | (clsz - 1)) + 1; |
| 1640 | written = write_clusters(na->ni->vol, rl, |
| 1641 | offs, rounded, outbuf); |
| 1642 | if (written != rounded) |
| 1643 | written = -1; |
| 1644 | } |
| 1645 | return (written); |
| 1646 | } |
| 1647 | |
| 1648 | /* |
| 1649 | * Write some data to be compressed. |
| 1650 | * Compression only occurs when a few clusters (usually 16) are |
| 1651 | * full. When this occurs an extra runlist slot may be needed, so |
| 1652 | * it has to be reserved beforehand. |
| 1653 | * |
| 1654 | * Returns the size of uncompressed data written, |
| 1655 | * or negative if an error occurred. |
| 1656 | * When the returned size is less than requested, new clusters have |
| 1657 | * to be allocated before the function is called again. |
| 1658 | */ |
| 1659 | |
| 1660 | s64 ntfs_compressed_pwrite(ntfs_attr *na, runlist_element *wrl, s64 wpos, |
| 1661 | s64 offs, s64 to_write, s64 rounded, |
| 1662 | const void *b, int compressed_part, |
| 1663 | VCN *update_from) |
| 1664 | { |
| 1665 | ntfs_volume *vol; |
| 1666 | runlist_element *brl; /* entry containing the beginning of block */ |
| 1667 | int compression_length; |
| 1668 | s64 written; |
| 1669 | s64 to_read; |
| 1670 | s64 to_flush; |
| 1671 | s64 roffs; |
| 1672 | s64 got; |
| 1673 | s64 start_vcn; |
| 1674 | s64 nextblock; |
| 1675 | s64 endwrite; |
| 1676 | u32 compsz; |
| 1677 | char *inbuf; |
| 1678 | char *outbuf; |
| 1679 | BOOL fail; |
| 1680 | BOOL done; |
| 1681 | BOOL compress; |
| 1682 | BOOL appending; |
| 1683 | |
| 1684 | if (!valid_compressed_run(na,wrl,FALSE,"begin compressed write")) { |
| 1685 | return (-1); |
| 1686 | } |
| 1687 | if ((*update_from < 0) |
| 1688 | || (compressed_part < 0) |
| 1689 | || (compressed_part > (int)na->compression_block_clusters)) { |
| 1690 | ntfs_log_error("Bad update vcn or compressed_part %d for compressed write\n", |
| 1691 | compressed_part); |
| 1692 | errno = EIO; |
| 1693 | return (-1); |
| 1694 | } |
| 1695 | /* make sure there are two unused entries in runlist */ |
| 1696 | if (na->unused_runs < 2) { |
| 1697 | ntfs_log_error("No unused runs for compressed write\n"); |
| 1698 | errno = EIO; |
| 1699 | return (-1); |
| 1700 | } |
| 1701 | if (wrl->vcn < *update_from) |
| 1702 | *update_from = wrl->vcn; |
| 1703 | written = -1; /* default return */ |
| 1704 | vol = na->ni->vol; |
| 1705 | compression_length = na->compression_block_clusters; |
| 1706 | compress = FALSE; |
| 1707 | done = FALSE; |
| 1708 | /* |
| 1709 | * Cannot accept writing beyond the current compression set |
| 1710 | * because when compression occurs, clusters are freed |
| 1711 | * and have to be reallocated. |
| 1712 | * (cannot happen with standard fuse 4K buffers) |
| 1713 | * Caller has to avoid this situation, or face consequences. |
| 1714 | */ |
| 1715 | nextblock = ((offs + (wrl->vcn << vol->cluster_size_bits)) |
| 1716 | | (na->compression_block_size - 1)) + 1; |
| 1717 | /* determine whether we are appending to file */ |
| 1718 | endwrite = offs + to_write + (wrl->vcn << vol->cluster_size_bits); |
| 1719 | appending = endwrite >= na->initialized_size; |
| 1720 | if (endwrite >= nextblock) { |
| 1721 | /* it is time to compress */ |
| 1722 | compress = TRUE; |
| 1723 | /* only process what we can */ |
| 1724 | to_write = rounded = nextblock |
| 1725 | - (offs + (wrl->vcn << vol->cluster_size_bits)); |
| 1726 | } |
| 1727 | start_vcn = 0; |
| 1728 | fail = FALSE; |
| 1729 | brl = wrl; |
| 1730 | roffs = 0; |
| 1731 | /* |
| 1732 | * If we are about to compress or we need to decompress |
| 1733 | * existing data, we have to process a full set of blocks. |
| 1734 | * So relocate the parameters to the beginning of allocation |
| 1735 | * containing the first byte of the set of blocks. |
| 1736 | */ |
| 1737 | if (compress || compressed_part) { |
| 1738 | /* find the beginning of block */ |
| 1739 | start_vcn = (wrl->vcn + (offs >> vol->cluster_size_bits)) |
| 1740 | & -compression_length; |
| 1741 | if (start_vcn < *update_from) |
| 1742 | *update_from = start_vcn; |
| 1743 | while (brl->vcn && (brl->vcn > start_vcn)) { |
| 1744 | /* jumping back a hole means big trouble */ |
| 1745 | if (brl->lcn == (LCN)LCN_HOLE) { |
| 1746 | ntfs_log_error("jump back over a hole when appending\n"); |
| 1747 | fail = TRUE; |
| 1748 | errno = EIO; |
| 1749 | } |
| 1750 | brl--; |
| 1751 | offs += brl->length << vol->cluster_size_bits; |
| 1752 | } |
| 1753 | roffs = (start_vcn - brl->vcn) << vol->cluster_size_bits; |
| 1754 | } |
| 1755 | if (compressed_part && !fail) { |
| 1756 | /* |
| 1757 | * The set of compression blocks contains compressed data |
| 1758 | * (we are reopening an existing file to append to it) |
| 1759 | * Decompress the data and append |
| 1760 | */ |
| 1761 | compsz = (s32)compressed_part << vol->cluster_size_bits; |
| 1762 | outbuf = (char*)ntfs_malloc(na->compression_block_size); |
| 1763 | if (outbuf) { |
| 1764 | if (appending) { |
| 1765 | to_read = offs - roffs; |
| 1766 | to_flush = to_read + to_write; |
| 1767 | } else { |
| 1768 | to_read = na->data_size |
| 1769 | - (brl->vcn << vol->cluster_size_bits); |
| 1770 | if (to_read > na->compression_block_size) |
| 1771 | to_read = na->compression_block_size; |
| 1772 | to_flush = to_read; |
| 1773 | } |
| 1774 | if (!ntfs_read_append(na, brl, roffs, compsz, |
| 1775 | (s32)(offs - roffs), appending, |
| 1776 | outbuf, to_write, b)) { |
| 1777 | written = ntfs_flush(na, brl, roffs, |
| 1778 | outbuf, to_flush, compress, appending, |
| 1779 | update_from); |
| 1780 | if (written >= 0) { |
| 1781 | written = to_write; |
| 1782 | done = TRUE; |
| 1783 | } |
| 1784 | } |
| 1785 | free(outbuf); |
| 1786 | } |
| 1787 | } else { |
| 1788 | if (compress && !fail) { |
| 1789 | /* |
| 1790 | * we are filling up a block, read the full set |
| 1791 | * of blocks and compress it |
| 1792 | */ |
| 1793 | inbuf = (char*)ntfs_malloc(na->compression_block_size); |
| 1794 | if (inbuf) { |
| 1795 | to_read = offs - roffs; |
| 1796 | if (to_read) |
| 1797 | got = read_clusters(vol, brl, roffs, |
| 1798 | to_read, inbuf); |
| 1799 | else |
| 1800 | got = 0; |
| 1801 | if (got == to_read) { |
| 1802 | memcpy(&inbuf[to_read],b,to_write); |
| 1803 | written = ntfs_comp_set(na, brl, roffs, |
| 1804 | to_read + to_write, inbuf); |
| 1805 | /* |
| 1806 | * if compression was not successful, |
| 1807 | * only write the part which was requested |
| 1808 | */ |
| 1809 | if ((written >= 0) |
| 1810 | /* free the unused clusters */ |
| 1811 | && !ntfs_compress_free(na,brl, |
| 1812 | written + roffs, |
| 1813 | na->compression_block_size |
| 1814 | + roffs, |
| 1815 | appending, update_from)) { |
| 1816 | done = TRUE; |
| 1817 | written = to_write; |
| 1818 | } |
| 1819 | } |
| 1820 | free(inbuf); |
| 1821 | } |
| 1822 | } |
| 1823 | if (!done) { |
| 1824 | /* |
| 1825 | * if the compression block is not full, or |
| 1826 | * if compression failed for whatever reason, |
| 1827 | * write uncompressed |
| 1828 | */ |
| 1829 | /* check we are not overflowing current allocation */ |
| 1830 | if ((wpos + rounded) |
| 1831 | > ((wrl->lcn + wrl->length) |
| 1832 | << vol->cluster_size_bits)) { |
| 1833 | ntfs_log_error("writing on unallocated clusters\n"); |
| 1834 | errno = EIO; |
| 1835 | } else { |
| 1836 | written = ntfs_pwrite(vol->dev, wpos, |
| 1837 | rounded, b); |
| 1838 | if (written == rounded) |
| 1839 | written = to_write; |
| 1840 | } |
| 1841 | } |
| 1842 | } |
| 1843 | if ((written >= 0) |
| 1844 | && !valid_compressed_run(na,wrl,TRUE,"end compressed write")) |
| 1845 | written = -1; |
| 1846 | return (written); |
| 1847 | } |
| 1848 | |
| 1849 | /* |
| 1850 | * Close a file written compressed. |
| 1851 | * This compresses the last partial compression block of the file. |
| 1852 | * Two empty runlist slots have to be reserved beforehand. |
| 1853 | * |
| 1854 | * Returns zero if closing is successful. |
| 1855 | */ |
| 1856 | |
| 1857 | int ntfs_compressed_close(ntfs_attr *na, runlist_element *wrl, s64 offs, |
| 1858 | VCN *update_from) |
| 1859 | { |
| 1860 | ntfs_volume *vol; |
| 1861 | runlist_element *brl; /* entry containing the beginning of block */ |
| 1862 | int compression_length; |
| 1863 | s64 written; |
| 1864 | s64 to_read; |
| 1865 | s64 roffs; |
| 1866 | s64 got; |
| 1867 | s64 start_vcn; |
| 1868 | char *inbuf; |
| 1869 | BOOL fail; |
| 1870 | BOOL done; |
| 1871 | |
| 1872 | if (na->unused_runs < 2) { |
| 1873 | ntfs_log_error("No unused runs for compressed close\n"); |
| 1874 | errno = EIO; |
| 1875 | return (-1); |
| 1876 | } |
| 1877 | if (*update_from < 0) { |
| 1878 | ntfs_log_error("Bad update vcn for compressed close\n"); |
| 1879 | errno = EIO; |
| 1880 | return (-1); |
| 1881 | } |
| 1882 | if (wrl->vcn < *update_from) |
| 1883 | *update_from = wrl->vcn; |
| 1884 | vol = na->ni->vol; |
| 1885 | compression_length = na->compression_block_clusters; |
| 1886 | done = FALSE; |
| 1887 | /* |
| 1888 | * There generally is an uncompressed block at end of file, |
| 1889 | * read the full block and compress it |
| 1890 | */ |
| 1891 | inbuf = (char*)ntfs_malloc(na->compression_block_size); |
| 1892 | if (inbuf) { |
| 1893 | start_vcn = (wrl->vcn + (offs >> vol->cluster_size_bits)) |
| 1894 | & -compression_length; |
| 1895 | if (start_vcn < *update_from) |
| 1896 | *update_from = start_vcn; |
| 1897 | to_read = offs + ((wrl->vcn - start_vcn) |
| 1898 | << vol->cluster_size_bits); |
| 1899 | brl = wrl; |
| 1900 | fail = FALSE; |
| 1901 | while (brl->vcn && (brl->vcn > start_vcn)) { |
| 1902 | if (brl->lcn == (LCN)LCN_HOLE) { |
| 1903 | ntfs_log_error("jump back over a hole when closing\n"); |
| 1904 | fail = TRUE; |
| 1905 | errno = EIO; |
| 1906 | } |
| 1907 | brl--; |
| 1908 | } |
| 1909 | if (!fail) { |
| 1910 | /* roffs can be an offset from another uncomp block */ |
| 1911 | roffs = (start_vcn - brl->vcn) |
| 1912 | << vol->cluster_size_bits; |
| 1913 | if (to_read) { |
| 1914 | got = read_clusters(vol, brl, roffs, to_read, |
| 1915 | inbuf); |
| 1916 | if (got == to_read) { |
| 1917 | written = ntfs_comp_set(na, brl, roffs, |
| 1918 | to_read, inbuf); |
| 1919 | if ((written >= 0) |
| 1920 | /* free the unused clusters */ |
| 1921 | && !ntfs_compress_free(na,brl, |
| 1922 | written + roffs, |
| 1923 | na->compression_block_size + roffs, |
| 1924 | TRUE, update_from)) { |
| 1925 | done = TRUE; |
| 1926 | } else |
| 1927 | /* if compression failed, leave uncompressed */ |
| 1928 | if (written == -1) |
| 1929 | done = TRUE; |
| 1930 | } |
| 1931 | } else |
| 1932 | done = TRUE; |
| 1933 | free(inbuf); |
| 1934 | } |
| 1935 | } |
| 1936 | if (done && !valid_compressed_run(na,wrl,TRUE,"end compressed close")) |
| 1937 | done = FALSE; |
| 1938 | return (!done); |
| 1939 | } |