3 Copyright (C) 1998, 1999, 2000, 2001, 2007 Free Software Foundation, Inc.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
27 needs_duplicating (const FatOpContext* ctx, FatFragment frag)
29 FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
30 FatCluster cluster = fat_frag_to_cluster (ctx->old_fs, frag);
33 PED_ASSERT (cluster >= 2 && cluster < old_fs_info->cluster_count + 2,
36 flag = fat_get_fragment_flag (ctx->old_fs, frag);
41 case FAT_FLAG_DIRECTORY:
45 return fat_op_context_map_static_fragment (ctx, frag) == -1;
55 search_next_fragment (FatOpContext* ctx)
57 FatSpecific* fs_info = FAT_SPECIFIC (ctx->old_fs);
59 for (; ctx->buffer_offset < fs_info->frag_count; ctx->buffer_offset++) {
60 if (needs_duplicating (ctx, ctx->buffer_offset))
63 return 0; /* all done! */
67 read_marked_fragments (FatOpContext* ctx, FatFragment length)
69 FatSpecific* fs_info = FAT_SPECIFIC (ctx->old_fs);
73 ped_exception_fetch_all ();
74 status = fat_read_fragments (ctx->old_fs, fs_info->buffer,
75 ctx->buffer_offset, length);
76 ped_exception_leave_all ();
80 ped_exception_catch ();
82 /* something bad happened, so read fragments one by one. (The error may
83 have occurred on an unused fragment: who cares) */
84 for (i = 0; i < length; i++) {
85 if (ctx->buffer_map [i]) {
86 if (!fat_read_fragment (ctx->old_fs,
87 fs_info->buffer + i * fs_info->frag_size,
88 ctx->buffer_offset + i))
97 fetch_fragments (FatOpContext* ctx)
99 FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
100 FatFragment fetch_length = 0;
103 for (frag = 0; frag < ctx->buffer_frags; frag++)
104 ctx->buffer_map [frag] = -1;
107 frag < ctx->buffer_frags
108 && ctx->buffer_offset + frag < old_fs_info->frag_count;
110 if (needs_duplicating (ctx, ctx->buffer_offset + frag)) {
111 ctx->buffer_map [frag] = 1;
112 fetch_length = frag + 1;
116 if (!read_marked_fragments (ctx, fetch_length))
122 /*****************************************************************************
123 * here starts the write code. All assumes that ctx->buffer_map [first] and
124 * ctx->buffer_map [last] are occupied by fragments that need to be duplicated.
125 *****************************************************************************/
127 /* finds the first fragment that is not going to get overwritten (that needs to
130 get_first_underlay (const FatOpContext* ctx, int first, int last)
135 PED_ASSERT (first <= last, return 0);
137 new = ctx->buffer_map [first];
138 for (old = first + 1; old <= last; old++) {
139 if (ctx->buffer_map [old] == -1)
142 if (ctx->buffer_map [old] != new)
148 /* finds the last fragment that is not going to get overwritten (that needs to
151 get_last_underlay (const FatOpContext* ctx, int first, int last)
156 PED_ASSERT (first <= last, return 0);
158 new = ctx->buffer_map [last];
159 for (old = last - 1; old >= first; old--) {
160 if (ctx->buffer_map [old] == -1)
163 if (ctx->buffer_map [old] != new)
169 /* "underlay" refers to the "static" fragments, that remain unchanged.
170 * when writing large chunks at a time, we don't want to clobber these,
171 * so we read them in, and write them back again. MUCH quicker that way.
174 quick_group_write_read_underlay (FatOpContext* ctx, int first, int last)
176 FatSpecific* new_fs_info = FAT_SPECIFIC (ctx->new_fs);
177 FatFragment first_underlay;
178 FatFragment last_underlay;
179 FatFragment underlay_length;
181 PED_ASSERT (first <= last, return 0);
183 first_underlay = get_first_underlay (ctx, first, last);
184 if (first_underlay == -1)
186 last_underlay = get_last_underlay (ctx, first, last);
188 PED_ASSERT (first_underlay <= last_underlay, return 0);
190 underlay_length = last_underlay - first_underlay + 1;
191 if (!fat_read_fragments (ctx->new_fs,
193 + (first_underlay - ctx->buffer_map [first])
194 * new_fs_info->frag_size,
201 /* quick_group_write() makes no attempt to recover from errors - just
202 * does things fast. If there is an error, slow_group_write() is
204 * Note: we do syncing writes, to make sure there isn't any
205 * error writing out. It's rather difficult recovering from errors
209 quick_group_write (FatOpContext* ctx, int first, int last)
211 FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
212 FatSpecific* new_fs_info = FAT_SPECIFIC (ctx->new_fs);
217 PED_ASSERT (first <= last, return 0);
219 ped_exception_fetch_all ();
220 if (!quick_group_write_read_underlay (ctx, first, last))
223 for (i = first; i <= last; i++) {
224 if (ctx->buffer_map [i] == -1)
227 offset = ctx->buffer_map [i] - ctx->buffer_map [first];
228 memcpy (new_fs_info->buffer + offset * new_fs_info->frag_size,
229 old_fs_info->buffer + i * new_fs_info->frag_size,
230 new_fs_info->frag_size);
233 active_length = ctx->buffer_map [last] - ctx->buffer_map [first] + 1;
234 if (!fat_write_sync_fragments (ctx->new_fs, new_fs_info->buffer,
235 ctx->buffer_map [first], active_length))
238 ped_exception_leave_all ();
242 ped_exception_catch ();
243 ped_exception_leave_all ();
247 /* Writes fragments out, one at a time, avoiding errors on redundant writes
248 * on damaged parts of the disk we already know about. If there's an error
249 * on one of the required fragments, it gets marked as bad, and a replacement
253 slow_group_write (FatOpContext* ctx, int first, int last)
255 FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
256 FatSpecific* new_fs_info = FAT_SPECIFIC (ctx->new_fs);
259 PED_ASSERT (first <= last, return 0);
261 for (i = first; i <= last; i++) {
262 if (ctx->buffer_map [i] == -1)
265 while (!fat_write_sync_fragment (ctx->new_fs,
266 old_fs_info->buffer + i * old_fs_info->frag_size,
267 ctx->buffer_map [i])) {
268 fat_table_set_bad (new_fs_info->fat,
269 ctx->buffer_map [i]);
270 ctx->buffer_map [i] = fat_table_alloc_cluster
272 if (ctx->buffer_map [i] == 0)
280 update_remap (FatOpContext* ctx, int first, int last)
284 PED_ASSERT (first <= last, return 0);
286 for (i = first; i <= last; i++) {
287 if (ctx->buffer_map [i] == -1)
289 ctx->remap [ctx->buffer_offset + i] = ctx->buffer_map [i];
296 group_write (FatOpContext* ctx, int first, int last)
298 PED_ASSERT (first <= last, return 0);
300 if (!quick_group_write (ctx, first, last)) {
301 if (!slow_group_write (ctx, first, last))
304 if (!update_remap (ctx, first, last))
309 /* assumes fragment size and new_fs's cluster size are equal */
311 write_fragments (FatOpContext* ctx)
313 FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
314 FatSpecific* new_fs_info = FAT_SPECIFIC (ctx->new_fs);
316 int group_end = -1; /* shut gcc up! */
317 FatFragment mapped_length;
319 FatCluster new_cluster;
321 PED_ASSERT (ctx->buffer_offset < old_fs_info->frag_count, return 0);
324 for (i = 0; i < ctx->buffer_frags; i++) {
325 if (ctx->buffer_map [i] == -1)
330 new_cluster = fat_table_alloc_cluster (new_fs_info->fat);
333 fat_table_set_eof (new_fs_info->fat, new_cluster);
334 ctx->buffer_map [i] = fat_cluster_to_frag (ctx->new_fs,
337 if (group_start == -1)
338 group_start = group_end = i;
340 PED_ASSERT (ctx->buffer_map [i]
341 >= ctx->buffer_map [group_start],
344 mapped_length = ctx->buffer_map [i]
345 - ctx->buffer_map [group_start] + 1;
346 if (mapped_length <= ctx->buffer_frags) {
349 /* ran out of room in the buffer, so write this group,
350 * and start a new one...
352 if (!group_write (ctx, group_start, group_end))
354 group_start = group_end = i;
358 PED_ASSERT (group_start != -1, return 0);
360 if (!group_write (ctx, group_start, group_end))
365 /* default all fragments to unmoved
368 init_remap (FatOpContext* ctx)
370 FatSpecific* old_fs_info = FAT_SPECIFIC (ctx->old_fs);
373 for (i = 0; i < old_fs_info->frag_count; i++)
374 ctx->remap[i] = fat_op_context_map_static_fragment (ctx, i);
378 count_frags_to_dup (FatOpContext* ctx)
380 FatSpecific* fs_info = FAT_SPECIFIC (ctx->old_fs);
386 for (i = 0; i < fs_info->frag_count; i++) {
387 if (needs_duplicating (ctx, i))
394 /* duplicates unreachable file clusters, and all directory clusters
397 fat_duplicate_clusters (FatOpContext* ctx, PedTimer* timer)
399 FatFragment total_frags_to_dup;
402 total_frags_to_dup = count_frags_to_dup (ctx);
404 ped_timer_reset (timer);
405 ped_timer_set_state_name (timer, "moving data");
407 ctx->buffer_offset = 0;
408 ctx->frags_duped = 0;
409 while (search_next_fragment (ctx)) {
411 timer, 1.0 * ctx->frags_duped / total_frags_to_dup);
413 if (!fetch_fragments (ctx))
415 if (!write_fragments (ctx))
417 ctx->buffer_offset += ctx->buffer_frags;
420 ped_timer_update (timer, 1.0);
424 #endif /* !DISCOVER_ONLY */