00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00038 #include "fat_fat.h"
00039 #include "fat_dentry.h"
00040 #include "fat.h"
00041 #include "../../vfs/vfs.h"
00042 #include <libfs.h>
00043 #include <libblock.h>
00044 #include <errno.h>
00045 #include <byteorder.h>
00046 #include <align.h>
00047 #include <assert.h>
00048 #include <fibril_synch.h>
00049 #include <malloc.h>
00050 #include <mem.h>
00051
00052
00053
00054
00055
00056 #define RDS(bs) ((sizeof(fat_dentry_t) * RDE((bs))) / BPS((bs))) + \
00057 (((sizeof(fat_dentry_t) * RDE((bs))) % BPS((bs))) != 0)
00058 #define SSA(bs) (RSCNT((bs)) + FATCNT((bs)) * SF((bs)) + RDS(bs))
00059
00060 #define CLBN2PBN(bs, cl, bn) \
00061 (SSA((bs)) + ((cl) - FAT_CLST_FIRST) * SPC((bs)) + (bn) % SPC((bs)))
00062
00068 static FIBRIL_MUTEX_INITIALIZE(fat_alloc_lock);
00069
00083 int
00084 fat_cluster_walk(fat_bs_t *bs, devmap_handle_t devmap_handle, fat_cluster_t firstc,
00085 fat_cluster_t *lastc, uint16_t *numc, uint16_t max_clusters)
00086 {
00087 block_t *b;
00088 uint16_t clusters = 0;
00089 fat_cluster_t clst = firstc;
00090 int rc;
00091
00092 if (firstc == FAT_CLST_RES0) {
00093
00094 if (lastc)
00095 *lastc = firstc;
00096 if (numc)
00097 *numc = 0;
00098 return EOK;
00099 }
00100
00101 while (clst < FAT_CLST_LAST1 && clusters < max_clusters) {
00102 aoff64_t fsec;
00103 unsigned fidx;
00104
00105 assert(clst >= FAT_CLST_FIRST);
00106 if (lastc)
00107 *lastc = clst;
00108 fsec = (clst * sizeof(fat_cluster_t)) / BPS(bs);
00109 fidx = clst % (BPS(bs) / sizeof(fat_cluster_t));
00110
00111 rc = block_get(&b, devmap_handle, RSCNT(bs) + fsec,
00112 BLOCK_FLAGS_NONE);
00113 if (rc != EOK)
00114 return rc;
00115 clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]);
00116 assert(clst != FAT_CLST_BAD);
00117 rc = block_put(b);
00118 if (rc != EOK)
00119 return rc;
00120 clusters++;
00121 }
00122
00123 if (lastc && clst < FAT_CLST_LAST1)
00124 *lastc = clst;
00125 if (numc)
00126 *numc = clusters;
00127
00128 return EOK;
00129 }
00130
00141 int
00142 fat_block_get(block_t **block, struct fat_bs *bs, fat_node_t *nodep,
00143 aoff64_t bn, int flags)
00144 {
00145 fat_cluster_t firstc = nodep->firstc;
00146 fat_cluster_t currc;
00147 aoff64_t relbn = bn;
00148 int rc;
00149
00150 if (!nodep->size)
00151 return ELIMIT;
00152
00153 if (nodep->firstc == FAT_CLST_ROOT)
00154 goto fall_through;
00155
00156 if (((((nodep->size - 1) / BPS(bs)) / SPC(bs)) == bn / SPC(bs)) &&
00157 nodep->lastc_cached_valid) {
00158
00159
00160
00161
00162 return block_get(block, nodep->idx->devmap_handle,
00163 CLBN2PBN(bs, nodep->lastc_cached_value, bn), flags);
00164 }
00165
00166 if (nodep->currc_cached_valid && bn >= nodep->currc_cached_bn) {
00167
00168
00169
00170
00171 firstc = nodep->currc_cached_value;
00172 relbn -= (nodep->currc_cached_bn / SPC(bs)) * SPC(bs);
00173 }
00174
00175 fall_through:
00176 rc = _fat_block_get(block, bs, nodep->idx->devmap_handle, firstc,
00177 &currc, relbn, flags);
00178 if (rc != EOK)
00179 return rc;
00180
00181
00182
00183
00184 nodep->currc_cached_valid = true;
00185 nodep->currc_cached_bn = bn;
00186 nodep->currc_cached_value = currc;
00187
00188 return rc;
00189 }
00190
00206 int
00207 _fat_block_get(block_t **block, fat_bs_t *bs, devmap_handle_t devmap_handle,
00208 fat_cluster_t fcl, fat_cluster_t *clp, aoff64_t bn, int flags)
00209 {
00210 uint16_t clusters;
00211 unsigned max_clusters;
00212 fat_cluster_t c;
00213 int rc;
00214
00215
00216
00217
00218 if (fcl == FAT_CLST_RES0)
00219 return ELIMIT;
00220
00221 if (fcl == FAT_CLST_ROOT) {
00222
00223 assert(bn < RDS(bs));
00224 rc = block_get(block, devmap_handle,
00225 RSCNT(bs) + FATCNT(bs) * SF(bs) + bn, flags);
00226 return rc;
00227 }
00228
00229 max_clusters = bn / SPC(bs);
00230 rc = fat_cluster_walk(bs, devmap_handle, fcl, &c, &clusters, max_clusters);
00231 if (rc != EOK)
00232 return rc;
00233 assert(clusters == max_clusters);
00234
00235 rc = block_get(block, devmap_handle, CLBN2PBN(bs, c, bn), flags);
00236
00237 if (clp)
00238 *clp = c;
00239
00240 return rc;
00241 }
00242
00255 int fat_fill_gap(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl, aoff64_t pos)
00256 {
00257 block_t *b;
00258 aoff64_t o, boundary;
00259 int rc;
00260
00261 boundary = ROUND_UP(nodep->size, BPS(bs) * SPC(bs));
00262
00263
00264 for (o = nodep->size; o < pos && o < boundary;
00265 o = ALIGN_DOWN(o + BPS(bs), BPS(bs))) {
00266 int flags = (o % BPS(bs) == 0) ?
00267 BLOCK_FLAGS_NOREAD : BLOCK_FLAGS_NONE;
00268 rc = fat_block_get(&b, bs, nodep, o / BPS(bs), flags);
00269 if (rc != EOK)
00270 return rc;
00271 memset(b->data + o % BPS(bs), 0, BPS(bs) - o % BPS(bs));
00272 b->dirty = true;
00273 rc = block_put(b);
00274 if (rc != EOK)
00275 return rc;
00276 }
00277
00278 if (o >= pos)
00279 return EOK;
00280
00281
00282 for (o = boundary; o < pos; o += BPS(bs)) {
00283 rc = _fat_block_get(&b, bs, nodep->idx->devmap_handle, mcl,
00284 NULL, (o - boundary) / BPS(bs), BLOCK_FLAGS_NOREAD);
00285 if (rc != EOK)
00286 return rc;
00287 memset(b->data, 0, min(BPS(bs), pos - o));
00288 b->dirty = true;
00289 rc = block_put(b);
00290 if (rc != EOK)
00291 return rc;
00292 }
00293
00294 return EOK;
00295 }
00296
00306 int
00307 fat_get_cluster(fat_bs_t *bs, devmap_handle_t devmap_handle, unsigned fatno,
00308 fat_cluster_t clst, fat_cluster_t *value)
00309 {
00310 block_t *b;
00311 fat_cluster_t *cp;
00312 int rc;
00313
00314 rc = block_get(&b, devmap_handle, RSCNT(bs) + SF(bs) * fatno +
00315 (clst * sizeof(fat_cluster_t)) / BPS(bs), BLOCK_FLAGS_NONE);
00316 if (rc != EOK)
00317 return rc;
00318 cp = (fat_cluster_t *)b->data +
00319 clst % (BPS(bs) / sizeof(fat_cluster_t));
00320 *value = uint16_t_le2host(*cp);
00321 rc = block_put(b);
00322
00323 return rc;
00324 }
00325
00336 int
00337 fat_set_cluster(fat_bs_t *bs, devmap_handle_t devmap_handle, unsigned fatno,
00338 fat_cluster_t clst, fat_cluster_t value)
00339 {
00340 block_t *b;
00341 fat_cluster_t *cp;
00342 int rc;
00343
00344 assert(fatno < FATCNT(bs));
00345 rc = block_get(&b, devmap_handle, RSCNT(bs) + SF(bs) * fatno +
00346 (clst * sizeof(fat_cluster_t)) / BPS(bs), BLOCK_FLAGS_NONE);
00347 if (rc != EOK)
00348 return rc;
00349 cp = (fat_cluster_t *)b->data +
00350 clst % (BPS(bs) / sizeof(fat_cluster_t));
00351 *cp = host2uint16_t_le(value);
00352 b->dirty = true;
00353 rc = block_put(b);
00354 return rc;
00355 }
00356
00366 int fat_alloc_shadow_clusters(fat_bs_t *bs, devmap_handle_t devmap_handle,
00367 fat_cluster_t *lifo, unsigned nclsts)
00368 {
00369 uint8_t fatno;
00370 unsigned c;
00371 int rc;
00372
00373 for (fatno = FAT1 + 1; fatno < bs->fatcnt; fatno++) {
00374 for (c = 0; c < nclsts; c++) {
00375 rc = fat_set_cluster(bs, devmap_handle, fatno, lifo[c],
00376 c == 0 ? FAT_CLST_LAST1 : lifo[c - 1]);
00377 if (rc != EOK)
00378 return rc;
00379 }
00380 }
00381
00382 return EOK;
00383 }
00384
00402 int
00403 fat_alloc_clusters(fat_bs_t *bs, devmap_handle_t devmap_handle, unsigned nclsts,
00404 fat_cluster_t *mcl, fat_cluster_t *lcl)
00405 {
00406 block_t *blk;
00407 fat_cluster_t *lifo;
00408 unsigned found = 0;
00409 unsigned b, c, cl;
00410 int rc;
00411
00412 lifo = (fat_cluster_t *) malloc(nclsts * sizeof(fat_cluster_t));
00413 if (!lifo)
00414 return ENOMEM;
00415
00416
00417
00418
00419 fibril_mutex_lock(&fat_alloc_lock);
00420 for (b = 0, cl = 0; b < SF(bs); b++) {
00421 rc = block_get(&blk, devmap_handle, RSCNT(bs) + b,
00422 BLOCK_FLAGS_NONE);
00423 if (rc != EOK)
00424 goto error;
00425 for (c = 0; c < BPS(bs) / sizeof(fat_cluster_t); c++, cl++) {
00426
00427
00428
00429
00430
00431
00432
00433
00434 if ((cl >= FAT_CLST_FIRST) &&
00435 CLBN2PBN(bs, cl, SPC(bs) - 1) >= TS(bs)) {
00436 rc = block_put(blk);
00437 if (rc != EOK)
00438 goto error;
00439 goto out;
00440 }
00441
00442 fat_cluster_t *clst = (fat_cluster_t *)blk->data + c;
00443 if (uint16_t_le2host(*clst) == FAT_CLST_RES0) {
00444
00445
00446
00447
00448 lifo[found] = cl;
00449 *clst = (found == 0) ?
00450 host2uint16_t_le(FAT_CLST_LAST1) :
00451 host2uint16_t_le(lifo[found - 1]);
00452 blk->dirty = true;
00453 if (++found == nclsts) {
00454
00455 rc = block_put(blk);
00456 if (rc != EOK)
00457 goto error;
00458
00459 rc = fat_alloc_shadow_clusters(bs,
00460 devmap_handle, lifo, nclsts);
00461 if (rc != EOK)
00462 goto error;
00463 *mcl = lifo[found - 1];
00464 *lcl = lifo[0];
00465 free(lifo);
00466 fibril_mutex_unlock(&fat_alloc_lock);
00467 return EOK;
00468 }
00469 }
00470 }
00471 rc = block_put(blk);
00472 if (rc != EOK) {
00473 error:
00474 fibril_mutex_unlock(&fat_alloc_lock);
00475 free(lifo);
00476 return rc;
00477 }
00478 }
00479 out:
00480 fibril_mutex_unlock(&fat_alloc_lock);
00481
00482
00483
00484
00485
00486 while (found--) {
00487 rc = fat_set_cluster(bs, devmap_handle, FAT1, lifo[found],
00488 FAT_CLST_RES0);
00489 if (rc != EOK) {
00490 free(lifo);
00491 return rc;
00492 }
00493 }
00494
00495 free(lifo);
00496 return ENOSPC;
00497 }
00498
00507 int
00508 fat_free_clusters(fat_bs_t *bs, devmap_handle_t devmap_handle, fat_cluster_t firstc)
00509 {
00510 unsigned fatno;
00511 fat_cluster_t nextc;
00512 int rc;
00513
00514
00515 while (firstc < FAT_CLST_LAST1) {
00516 assert(firstc >= FAT_CLST_FIRST && firstc < FAT_CLST_BAD);
00517 rc = fat_get_cluster(bs, devmap_handle, FAT1, firstc, &nextc);
00518 if (rc != EOK)
00519 return rc;
00520 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) {
00521 rc = fat_set_cluster(bs, devmap_handle, fatno, firstc,
00522 FAT_CLST_RES0);
00523 if (rc != EOK)
00524 return rc;
00525 }
00526
00527 firstc = nextc;
00528 }
00529
00530 return EOK;
00531 }
00532
00542 int
00543 fat_append_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl,
00544 fat_cluster_t lcl)
00545 {
00546 devmap_handle_t devmap_handle = nodep->idx->devmap_handle;
00547 fat_cluster_t lastc;
00548 uint8_t fatno;
00549 int rc;
00550
00551 if (nodep->firstc == FAT_CLST_RES0) {
00552
00553 nodep->firstc = mcl;
00554 nodep->dirty = true;
00555 } else {
00556 if (nodep->lastc_cached_valid) {
00557 lastc = nodep->lastc_cached_value;
00558 nodep->lastc_cached_valid = false;
00559 } else {
00560 rc = fat_cluster_walk(bs, devmap_handle, nodep->firstc,
00561 &lastc, NULL, (uint16_t) -1);
00562 if (rc != EOK)
00563 return rc;
00564 }
00565
00566 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) {
00567 rc = fat_set_cluster(bs, nodep->idx->devmap_handle, fatno,
00568 lastc, mcl);
00569 if (rc != EOK)
00570 return rc;
00571 }
00572 }
00573
00574 nodep->lastc_cached_valid = true;
00575 nodep->lastc_cached_value = lcl;
00576
00577 return EOK;
00578 }
00579
00590 int fat_chop_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t lcl)
00591 {
00592 int rc;
00593 devmap_handle_t devmap_handle = nodep->idx->devmap_handle;
00594
00595
00596
00597
00598 nodep->lastc_cached_valid = false;
00599 if (nodep->currc_cached_value != lcl)
00600 nodep->currc_cached_valid = false;
00601
00602 if (lcl == FAT_CLST_RES0) {
00603
00604 rc = fat_free_clusters(bs, devmap_handle, nodep->firstc);
00605 if (rc != EOK)
00606 return rc;
00607 nodep->firstc = FAT_CLST_RES0;
00608 nodep->dirty = true;
00609 } else {
00610 fat_cluster_t nextc;
00611 unsigned fatno;
00612
00613 rc = fat_get_cluster(bs, devmap_handle, FAT1, lcl, &nextc);
00614 if (rc != EOK)
00615 return rc;
00616
00617
00618 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) {
00619 rc = fat_set_cluster(bs, devmap_handle, fatno, lcl,
00620 FAT_CLST_LAST1);
00621 if (rc != EOK)
00622 return rc;
00623 }
00624
00625
00626 rc = fat_free_clusters(bs, devmap_handle, nextc);
00627 if (rc != EOK)
00628 return rc;
00629 }
00630
00631
00632
00633
00634 nodep->lastc_cached_valid = true;
00635 nodep->lastc_cached_value = lcl;
00636
00637 return EOK;
00638 }
00639
00640 int
00641 fat_zero_cluster(struct fat_bs *bs, devmap_handle_t devmap_handle, fat_cluster_t c)
00642 {
00643 int i;
00644 block_t *b;
00645 int rc;
00646
00647 for (i = 0; i < SPC(bs); i++) {
00648 rc = _fat_block_get(&b, bs, devmap_handle, c, NULL, i,
00649 BLOCK_FLAGS_NOREAD);
00650 if (rc != EOK)
00651 return rc;
00652 memset(b->data, 0, BPS(bs));
00653 b->dirty = true;
00654 rc = block_put(b);
00655 if (rc != EOK)
00656 return rc;
00657 }
00658
00659 return EOK;
00660 }
00661
00668 int fat_sanity_check(fat_bs_t *bs, devmap_handle_t devmap_handle)
00669 {
00670 fat_cluster_t e0, e1;
00671 unsigned fat_no;
00672 int rc;
00673
00674
00675 if (bs->fatcnt == 0)
00676 return ENOTSUP;
00677
00678
00679
00680 if (bs->totsec16 == 0 && bs->totsec32 == 0)
00681 return ENOTSUP;
00682
00683 if (bs->totsec16 != 0 && bs->totsec32 != 0 &&
00684 bs->totsec16 != bs->totsec32)
00685 return ENOTSUP;
00686
00687
00688 if ((bs->mdesc & 0xf0) != 0xf0)
00689 return ENOTSUP;
00690
00691
00692 if (bs->sec_per_fat == 0)
00693 return ENOTSUP;
00694
00695
00696
00697
00698
00699
00700
00701
00702 if ((uint16_t_le2host(bs->root_ent_max) * sizeof(fat_dentry_t)) %
00703 uint16_t_le2host(bs->bps) != 0)
00704 return ENOTSUP;
00705
00706
00707
00708 for (fat_no = 0; fat_no < bs->fatcnt; fat_no++) {
00709 rc = fat_get_cluster(bs, devmap_handle, fat_no, 0, &e0);
00710 if (rc != EOK)
00711 return EIO;
00712
00713 rc = fat_get_cluster(bs, devmap_handle, fat_no, 1, &e1);
00714 if (rc != EOK)
00715 return EIO;
00716
00717
00718 if ((e0 & 0xff) != bs->mdesc)
00719 return ENOTSUP;
00720
00721
00722
00723
00724
00725 if ((e0 >> 8) != 0xff || e1 != 0xffff)
00726 return ENOTSUP;
00727 }
00728
00729 return EOK;
00730 }
00731