Allow an option to force the reader to read all tracks in a group.

This commit is contained in:
David Given
2022-04-23 12:37:52 +02:00
parent a107d4f17f
commit 7e2d300017
2 changed files with 20 additions and 9 deletions

View File

@@ -19,7 +19,7 @@ import "arch/zilogmcz/zilogmcz.proto";
import "lib/fluxsink/fluxsink.proto"; import "lib/fluxsink/fluxsink.proto";
import "lib/common.proto"; import "lib/common.proto";
//NEXT: 29 //NEXT: 30
message DecoderProto { message DecoderProto {
optional double pulse_debounce_threshold = 1 [default = 0.30, optional double pulse_debounce_threshold = 1 [default = 0.30,
(help) = "ignore pulses with intervals shorter than this, in fractions of a clock"]; (help) = "ignore pulses with intervals shorter than this, in fractions of a clock"];
@@ -61,5 +61,7 @@ message DecoderProto {
(help) = "how many times to retry each track in the event of a read failure"]; (help) = "how many times to retry each track in the event of a read failure"];
optional string write_csv_to = 23 optional string write_csv_to = 23
[(help) = "if set, write a CSV report of the disk state"]; [(help) = "if set, write a CSV report of the disk state"];
optional bool skip_unnecessary_tracks = 29 [default = true,
(help) = "don't read tracks if we already have all necessary sectors"];
} }

View File

@@ -27,6 +27,12 @@ enum ReadResult
BAD_AND_CAN_NOT_RETRY BAD_AND_CAN_NOT_RETRY
}; };
enum BadSectorsState
{
HAS_NO_BAD_SECTORS,
HAS_BAD_SECTORS
};
/* In order to allow rereads in file-based flux sources, we need to persist the /* In order to allow rereads in file-based flux sources, we need to persist the
* FluxSourceIterator (as that's where the state for which read to return is * FluxSourceIterator (as that's where the state for which read to return is
* held). This class handles that. */ * held). This class handles that. */
@@ -108,8 +114,7 @@ static std::set<std::shared_ptr<const Sector>> collectSectors(
return sector_set; return sector_set;
} }
/* Returns true if the result contains bad sectors. */ BadSectorsState combineRecordAndSectors(
bool combineRecordAndSectors(
TrackFlux& trackFlux, AbstractDecoder& decoder, const Location& location) TrackFlux& trackFlux, AbstractDecoder& decoder, const Location& location)
{ {
std::set<std::shared_ptr<const Sector>> track_sectors; std::set<std::shared_ptr<const Sector>> track_sectors;
@@ -128,12 +133,12 @@ bool combineRecordAndSectors(
trackFlux.sectors = collectSectors(track_sectors); trackFlux.sectors = collectSectors(track_sectors);
if (trackFlux.sectors.empty()) if (trackFlux.sectors.empty())
return true; return HAS_BAD_SECTORS;
for (const auto& sector : trackFlux.sectors) for (const auto& sector : trackFlux.sectors)
if (sector->status != Sector::OK) if (sector->status != Sector::OK)
return true; return HAS_BAD_SECTORS;
return false; return HAS_NO_BAD_SECTORS;
} }
ReadResult readGroup(FluxSourceIteratorHolder& fluxSourceIteratorHolder, ReadResult readGroup(FluxSourceIteratorHolder& fluxSourceIteratorHolder,
@@ -163,9 +168,13 @@ ReadResult readGroup(FluxSourceIteratorHolder& fluxSourceIteratorHolder,
auto trackdataflux = decoder.decodeToSectors(fluxmap, location); auto trackdataflux = decoder.decodeToSectors(fluxmap, location);
trackFlux.trackDatas.push_back(trackdataflux); trackFlux.trackDatas.push_back(trackdataflux);
if (!combineRecordAndSectors(trackFlux, decoder, location)) if (combineRecordAndSectors(trackFlux, decoder, location) == HAS_NO_BAD_SECTORS)
return GOOD_READ; {
if (fluxSourceIterator.hasNext()) result = GOOD_READ;
if (config.decoder().skip_unnecessary_tracks())
return result;
}
else if (fluxSourceIterator.hasNext())
result = BAD_AND_CAN_RETRY; result = BAD_AND_CAN_RETRY;
} }