Compare commits

...

599 Commits
scp ... convert

Author SHA1 Message Date
David Given
8b6073ccbb Try making the error collector non-constexpr? 2025-08-18 22:14:48 +02:00
David Given
f902c759df Try the suggested workaround in lexy for older compilers. 2025-08-18 22:10:06 +02:00
David Given
786636ef5d Don't allow writing Apple 2 flux images to SCP files, because there
isn't space for the quarter-step tracks.
2025-08-17 11:42:34 +02:00
David Given
18bdb27225 fluxengine convert now uses the same syntax as the other tools. 2025-08-17 11:26:16 +02:00
David Given
faca35dec0 Update documentation. 2025-08-17 10:51:50 +02:00
David Given
f8813daae3 Attempt to make work on Windows. 2025-08-17 10:47:54 +02:00
David Given
da5a20390f Fix unhelpful message. 2025-08-17 10:40:34 +02:00
David Given
3ab3db92f5 Add basic support for TI-99 disks. 2025-08-17 10:40:07 +02:00
David Given
a3cd3dd9dc Adjust dependencies. 2025-08-17 09:45:54 +02:00
David Given
918868e9e8 Try updating the Ubuntu version. 2025-08-17 09:43:10 +02:00
David Given
cf05a25445 Does _error_collector need a constexpr constructor and destructor? 2025-08-17 01:01:32 +02:00
David Given
5d5399a267 Add another weirdly missing file. 2025-08-17 00:55:10 +02:00
David Given
2de7af0ba5 Add weirdly missing file. 2025-08-17 00:52:01 +02:00
David Given
0382c304ad Warning fix. 2025-08-17 00:46:50 +02:00
David Given
182d9946fe Add missing file. 2025-08-17 00:40:55 +02:00
David Given
f24e4029b4 Flux sources now add the locations of their data to _extraConfig ---
which is now honoured. Fix a bunch of bugs in some of the flux sources
and sinks. The converter now actually works, maybe.
2025-08-17 00:38:25 +02:00
David Given
4ebda29171 Rename track -> cylinder in lots of places. 2025-08-16 17:39:55 +02:00
David Given
53026f3d02 Rework the way locations are handled to use the new locations
microformat rather than the old RangeProto.
2025-08-16 16:59:44 +02:00
David Given
99c0e95a2f Added a routine for parsing location lists using Lexy. 2025-08-15 23:39:21 +02:00
David Given
dfa56c6b08 Raw import of Lexy. 2025-08-14 23:36:31 +02:00
David Given
7db49aec21 Merge pull request #814 from davidgiven/build
Update ab.
2025-07-28 13:36:21 +02:00
David Given
b5eaec0778 Try more Windows fix? 2025-07-28 12:23:41 +01:00
David Given
06b126a2e7 Typo fix. 2025-07-27 23:20:32 +01:00
David Given
ed96ebac79 Another Windows fix. 2025-07-27 23:08:37 +01:00
David Given
c6e34d2d88 Alternative Windows fix. 2025-07-27 22:50:43 +01:00
David Given
53ac8bad79 Hopefully fix Windows. 2025-07-27 21:43:26 +01:00
David Given
d2e163bc3b More Windows build debugging. 2025-07-27 21:33:52 +01:00
David Given
1404123281 Windows debugging. 2025-07-27 21:20:28 +01:00
David Given
01a7afd28a Merge from master. 2025-07-27 20:48:27 +01:00
David Given
3a42911e6f Update ab. 2025-07-27 20:48:10 +01:00
David Given
8e5d52f2c7 Update ab. 2025-07-24 23:25:41 +02:00
David Given
dfff5d7230 Merge pull request #796 from davidgiven/layout
Fix and expand the layout support to allow libdsk `altback` layouts.
2025-07-21 00:29:10 +02:00
David Given
19b63786c8 Merge from master. 2025-07-21 00:01:41 +02:00
David Given
5293e1c18b Merge pull request #792 from davidgiven/builds
Make an attempt to switch to WSL 1 for better builds.
2025-04-24 21:07:12 +02:00
David Given
f200bb8b00 Update documentation. 2025-04-24 21:06:32 +02:00
David Given
ed11a5c412 Update release build script to match. 2025-04-24 21:04:21 +02:00
David Given
cdcc63f519 Disable the Debian 11 build, as it doesn't work. Try WSL1 again. 2025-04-24 20:49:44 +02:00
David Given
7096e9fd9c Disable Windows verbose builds. 2025-04-24 19:41:46 +02:00
David Given
c8fe56ea95 Switch the sandbox back to hardlinks. 2025-04-24 19:41:38 +02:00
David Given
8a2a58b1a5 Hopefully beat the OSX build into working. 2025-04-24 01:16:54 +02:00
David Given
42aec98368 Add missing file. 2025-04-22 23:11:43 +02:00
David Given
6d73371a79 Update ab. 2025-04-22 23:10:51 +02:00
David Given
4d60ff8e67 Update ab. 2025-03-20 02:11:10 +01:00
David Given
311ff4a89f Add in some missing dependencies. 2025-03-19 03:01:10 +01:00
David Given
5d57957a6e Add missing dependency. 2025-03-18 01:19:58 +01:00
David Given
f89adce02d Add missing file. 2025-03-18 01:08:09 +01:00
David Given
3e505f47bc It now builds properly! 2025-03-18 01:05:07 +01:00
David Given
06e29142e6 Arch files are now built as one library per subdirectory, and everything
is autodetected.
2025-03-18 00:37:07 +01:00
David Given
15a69f6dcb Make build with the new ab --- but the tests fail. 2025-03-17 22:33:54 +01:00
David Given
0f763fe06b Patch up for the libfmt change and update to c++20.Patch up for the
libfmt change and update to c++20.
2025-03-12 01:22:18 +01:00
David Given
f5adb89338 Upgrade dep/fmt to 11.1.4. 2025-03-12 01:07:17 +01:00
Märt Põder
36b120bdbe Add Juku 5104 floppies with a new filesystem_track_order option 2024-11-30 15:31:24 +02:00
David Given
cc169d414f Add experimental support for libdsk 'altback' mode layouts. 2024-11-29 22:39:34 +01:00
David Given
0fcb2075e0 Move filesystem_track_ordering from ImageReaderProto/ImageWriterProto to
ImgInputOutputProto; it now only applies to img files. Make it honour the
appropriate track layout setting too.
2024-11-29 22:30:33 +01:00
David Given
2bda78fb40 Distinguish between filesystem track ordering and image track ordering
(although currently only the filesystem ordering is used).
2024-11-29 22:07:58 +01:00
David Given
e878c6eef6 Remove the unused sector_order field from FilesystemProto. 2024-11-29 21:24:32 +01:00
David Given
9ce405cec5 Remove the broken install rule. 2024-11-24 23:10:48 +01:00
David Given
f064d413b3 Add a docker test for Manjaro Linux. 2024-11-24 22:50:44 +01:00
David Given
e5a3331f24 Enable debug tracing. 2024-11-22 21:44:56 +01:00
David Given
6f99f88b29 Merge from master. 2024-11-22 21:22:24 +01:00
David Given
8ff0153708 Add fedora 40 docker test. 2024-11-22 21:21:05 +01:00
David Given
c7273c06da Add docker tests for Fedora 41. 2024-11-22 21:17:34 +01:00
David Given
cd36caccc7 Warning fix. 2024-11-22 20:34:00 +01:00
David Given
a022aab28a Change the wx library order. 2024-11-09 20:52:47 +01:00
David Given
949e9c216d No, we need to stick with WSL2. 2024-11-09 19:35:03 +01:00
David Given
3fcf7d4e69 More adjust. 2024-11-09 19:11:00 +01:00
David Given
e335621558 Adjust. 2024-11-09 19:04:50 +01:00
David Given
9a0357c67b Fix filename. 2024-11-09 18:56:30 +01:00
David Given
0953039369 Try using WSL 1 with Fedora 41. 2024-11-09 18:52:34 +01:00
David Given
d4a8eb5847 More. 2024-11-09 18:19:14 +01:00
David Given
d48ab7c84e Adjust Docker setup script. 2024-11-09 18:00:15 +01:00
David Given
c43b88ac0b Try doing a Debian 11 build on github. 2024-11-09 17:59:02 +01:00
David Given
76ffbb96ba Remember to update the autorelease script for the new Fedora rpmsphere. 2024-11-09 14:05:25 +01:00
David Given
b6b28c8a02 Merge pull request #791 from davidgiven/docker
Make work on Debian 11.
2024-11-09 13:37:27 +01:00
David Given
a736e1da05 Upgrade rpm sphere to release 40. 2024-11-09 00:35:28 +01:00
David Given
a8cc280574 Looks like we need special options for OSX. 2024-11-08 22:15:35 +01:00
David Given
da9d9385b9 Another try at fixing the custom formatter. 2024-11-08 21:32:59 +01:00
David Given
149e5c6fba Make work on Debian 11. Add some Docker tests to make sure it keeps working on
Debian 11.
2024-11-08 21:07:17 +01:00
David Given
e14da81b48 Merge pull request #790 from davidgiven/ab
Update ab. Again.
2024-11-07 21:20:43 +01:00
David Given
49a0a6fdb3 Fix things caused by the ab upgrade. 2024-11-07 21:16:27 +01:00
David Given
da678dc52d Update ab. 2024-11-07 21:07:20 +01:00
David Given
6ff68f3f06 Update ab. 2024-11-07 21:06:56 +01:00
David Given
33feda7208 Update ab. 2024-10-23 14:34:38 +02:00
David Given
38af98ec9b Reformat. 2024-10-19 18:02:16 +02:00
David Given
d6a11d7164 Merge pull request #786 from davidgiven/refactor
Even more refactoring.
2024-10-19 17:32:44 +02:00
David Given
33d241c33e Move common.proto into config. 2024-10-19 16:58:44 +02:00
David Given
944ac97ba4 Move layout proto into config. 2024-10-19 16:44:02 +02:00
David Given
bb6e8dc6b2 Move the drive proto into config. 2024-10-19 16:34:34 +02:00
David Given
ed604f6284 Finally eliminate the +lib target. 2024-10-19 13:18:28 +02:00
David Given
3c67a67ae7 Merge pull request #785 from davidgiven/ab
Update ab.
2024-10-19 00:49:14 +02:00
David Given
f3e7a4e439 Update ab. 2024-10-19 00:27:38 +02:00
David Given
fb5bb874dd Update ab. 2024-10-19 00:19:05 +02:00
David Given
d3ccb4b0d7 Merge pull request #784 from davidgiven/refactor
Even more cleanup.
2024-10-18 21:15:04 +02:00
David Given
d096d7742f Adjust the number of OSX cpus. 2024-10-16 22:57:21 +02:00
David Given
92b5accb90 Tidy rule count. 2024-10-16 22:37:47 +02:00
David Given
1bcc8f45b5 Modularise encoders. Change arch to not depend on +lib. 2024-10-16 22:00:51 +02:00
David Given
5eef01377f Modularise decoders. 2024-10-16 21:52:53 +02:00
David Given
4c140b1b90 Modularise arch. 2024-10-16 21:17:59 +02:00
David Given
4bcbf2b089 Fix bad documentation which got checked in somehow. 2024-10-16 21:17:43 +02:00
David Given
6d3969ab79 Split the dependency so that the encoders/decoders don't depend on arch. 2024-10-16 21:00:48 +02:00
David Given
ea35551b9d Merge. 2024-10-16 20:28:46 +02:00
David Given
0d4c747a05 Merge pull request #783 from davidgiven/osx
More build script fixes.
2024-10-16 17:52:41 +02:00
David Given
6cc7d7b8fe Update build script... again. 2024-10-16 14:22:59 +02:00
David Given
3d0f82e4c6 Update ab. 2024-10-16 14:21:47 +02:00
David Given
26abcbaf81 Adjust OSX scripts... again. 2024-10-16 14:12:09 +02:00
David Given
f7efaa37f5 Merge pull request #782 from davidgiven/osx
Fix OSX builds.
2024-10-16 01:38:34 +02:00
David Given
167bb0287e Fix a stray header. 2024-10-16 01:29:35 +02:00
David Given
3020705012 Modularise imagewriter. 2024-10-16 01:12:35 +02:00
David Given
ecd80775d8 Modularise fluxsource. 2024-10-16 01:00:48 +02:00
David Given
c42e73f17a Modularise fluxsink and usb. 2024-10-16 00:41:57 +02:00
David Given
ec271a67ad Move the flx stuff into external. 2024-10-16 00:00:09 +02:00
David Given
ce4a9cfbd7 OSX 15 runners seem hard to find? 2024-10-15 23:36:58 +02:00
David Given
45f769893c Try and update the build scripts to use a matrix. 2024-10-15 23:07:36 +02:00
David Given
8a23046187 Fix another protocc invocation. 2024-10-15 22:48:48 +02:00
David Given
ca8f2a886e Was this the problem? 2024-10-15 22:45:50 +02:00
David Given
9dc6bdb03b Return of debugging. 2024-10-15 22:37:21 +02:00
David Given
c9b3e5e7e5 Bride of debugging. 2024-10-15 22:32:57 +02:00
David Given
f0ce9c1431 Even more debugging. 2024-10-15 22:12:48 +02:00
David Given
0b22593572 More debugging. 2024-10-15 22:11:06 +02:00
David Given
2fd1115ec9 Add debugging. 2024-10-15 22:05:42 +02:00
David Given
38d4bbdba1 Try upgrading the OSX version on CI. 2024-10-15 21:45:31 +02:00
David Given
cb9325843e Merge pull request #781 from davidgiven/refactor
More refactoring and modularisation.
2024-10-15 21:44:56 +02:00
David Given
900ba11623 Move csvreader into external. Why was it in core? 2024-10-15 00:56:57 +02:00
David Given
7da4e15b73 Move the Kryoflux and Catweasel stuff into external. 2024-10-15 00:52:02 +02:00
David Given
7983a4b883 Move the external file format stuff into its own module. 2024-10-15 00:46:46 +02:00
David Given
ea4a147751 Move fluxmapreader/fluxpattern into data. 2024-10-15 00:00:27 +02:00
David Given
33495e812e Update ab. 2024-10-14 23:29:09 +02:00
David Given
c6e2958665 Update ab. 2024-10-14 23:14:36 +02:00
David Given
86ede224ac Merge pull request #778 from davidgiven/refactor
Even more cleanup refactoring.
2024-10-14 00:35:08 +02:00
David Given
e07a906731 Silence some warnings. 2024-10-13 23:39:03 +02:00
David Given
76d139c29e Remember to add a virtual descructor to LogRenderer. 2024-10-13 23:32:41 +02:00
David Given
4b8487f164 Add missing file. 2024-10-13 23:27:12 +02:00
David Given
a006e0bf6f Overhaul the log stuff to actually work again (better). 2024-10-13 23:26:12 +02:00
David Given
c5373480ba Move a lot of the data-handling utilities into their own module. 2024-10-13 13:35:48 +02:00
David Given
e1f2494966 Create a config module containing all the configuration, proto and flags
stuff.
2024-10-13 11:57:18 +02:00
David Given
9ddfa5a56b Move the logger into core. 2024-10-13 11:39:33 +02:00
David Given
6339cd6b31 Move the routines which render log messages out of logger.cc; this breaks the
dependency which required logger.cc to have access to the entire rest of the
system. Although we still need to forward declare all the log messages.
2024-10-13 11:20:27 +02:00
David Given
7e80e255a4 Config no longer depends on Flux/Image/Encode/Decode, breaking a circular
dependency.
2024-10-12 01:49:13 +02:00
David Given
c6cbae4bc4 Merge pull request #777 from davidgiven/refactor
Lots more refactoring to build in a more modular way (and with a dramatically improved ab).
2024-10-11 22:45:59 +02:00
David Given
68f239765c Update ab. 2024-10-11 21:49:15 +02:00
David Given
77c57d7344 Update ab. 2024-10-09 23:03:35 +02:00
David Given
259c5d67e6 Update ab. 2024-10-09 22:36:42 +02:00
David Given
4c2fa27717 Update ab. 2024-10-09 15:40:01 +02:00
David Given
f5b14cadf8 Use "" to access the fmt headers, not <>, or else Windows can't find the
fallback library.
2024-10-09 15:10:22 +02:00
David Given
e154e41bc0 Alternate flag for thin archives. 2024-10-09 01:49:35 +02:00
David Given
1f2ebe8c33 Fixup due to bad intermediate files... 2024-10-09 01:37:15 +02:00
David Given
28444b7907 imagewriter protos built separately. 2024-10-09 01:30:28 +02:00
David Given
75f41fc630 imagereader proto built separately. 2024-10-09 01:28:07 +02:00
David Given
d091479f29 decoders protos now built separately. 2024-10-09 01:25:54 +02:00
David Given
665ceb9ea6 Add missing file. 2024-10-08 23:24:07 +02:00
David Given
27ab1e3e5a Refactor a lot of the proto stuff. 2024-10-08 23:18:30 +02:00
David Given
a777470cda Update ab. 2024-10-02 19:46:50 +02:00
David Given
f48d1c5196 Add progress info to MKDOC. 2024-10-02 11:38:24 +02:00
David Given
cee893b845 Merge pull request #774 from davidgiven/refactor
Do a lot of badly needed refactoring so that things aren't built as one huge library.
2024-10-02 11:34:24 +02:00
David Given
07ccb6319a Tweak dependencies. 2024-10-02 01:16:38 +02:00
David Given
a6981ff7ef Split the FluxmapReader and FluxPattern files. 2024-10-02 00:42:06 +02:00
David Given
fc8d0283b1 Remove the applesauce.cc and test, as it's not used any more. 2024-10-02 00:28:04 +02:00
David Given
5e892e8fc3 Fixup for Applesauce. 2024-10-02 00:23:39 +02:00
David Given
8b124e5ced Merge. 2024-10-01 23:58:48 +02:00
David Given
65dc707c29 Merge pull request #773 from davidgiven/applesauce
Add basic support for Applesauce hardware.
2024-10-01 23:52:37 +02:00
David Given
65fb3e9e76 Detect index marks and interleave them into the flux stream. 2024-10-01 22:26:02 +02:00
David Given
3a0c02459d Add documentation for the Applesauce. 2024-10-01 21:50:33 +02:00
David Given
33f3c97b03 Crudely hack in a lock against writing, as I'll have to wait for the v3
firmware.
2024-10-01 21:30:53 +02:00
David Given
c55032531c Move the VFS stuff into its own library. 2024-10-01 21:06:14 +02:00
David Given
12f999e9e4 Move hexdump into core. 2024-10-01 21:00:22 +02:00
David Given
817cf03592 Fixup after last checkin. 2024-10-01 20:55:02 +02:00
David Given
ca940d1599 Refactor core libraries into their own library. 2024-10-01 20:36:53 +02:00
David Given
d00fba02a0 Rename greaseWeazle to greaseweazle (as it needs doing). 2024-09-30 22:03:17 +02:00
David Given
045e5e7303 Rename write_clock to write_clock_ns, because it is. Tweak the default value
for correctness.
2024-09-30 22:02:09 +02:00
David Given
6fd98e3c67 Writing with the Applesauce now mostly works, although there's still stuff to
be dealt with.
2024-09-29 23:40:19 +02:00
David Given
98279e44ef Remove stray logging. 2024-09-29 23:39:59 +02:00
David Given
22b78cee56 Report which flag could not be recognised on error. 2024-09-29 23:39:32 +02:00
David Given
8f7203e38b Fix testpattern: to actually work. 2024-09-29 23:12:02 +02:00
David Given
38b8cd2574 Reads mostly work; writes not yet. 2024-09-29 13:05:46 +02:00
David Given
6b28f36b97 getVersion() doesn't need to be a public USB method. 2024-09-28 23:44:33 +02:00
David Given
d394b21920 Got our first track read! 2024-09-28 23:39:30 +02:00
David Given
14255a5076 Successfully read raw Applesauce data. 2024-09-28 23:07:05 +02:00
David Given
8fa1a887ce Initial work towards Applesauce support --- you can connect to the device and
test bandwidth.
2024-09-28 20:04:54 +02:00
David Given
fb6fa969a8 Update release script. 2024-09-28 17:05:41 +02:00
David Given
2840831017 Initial boilerplate for the Applesauce. 2024-09-28 17:03:59 +02:00
David Given
a4a83c6cfc Merge pull request #770 from davidgiven/ab
Update ab.
2024-09-28 16:55:52 +02:00
David Given
2c508cf51d Give artifacts unique names. 2024-09-28 16:08:59 +02:00
David Given
e02aa00d07 WSL2 works, so use it for the release script. 2024-09-28 13:35:48 +02:00
David Given
dc384c3635 Pre-release doesn't work. 2024-09-28 12:44:12 +02:00
David Given
69db44d1cf Try WSL 2. 2024-09-28 12:42:50 +02:00
David Given
6fdc9a252a Update ab. 2024-09-28 12:35:22 +02:00
David Given
11166a3c5c Try using the prerelease wsl. 2024-09-28 12:19:00 +02:00
David Given
3b2a3c6e3a Make the _progress script a bit more robust. 2024-09-28 12:12:58 +02:00
David Given
d890383ad2 Update ab. 2024-09-28 12:07:10 +02:00
David Given
4c4b6ee045 Try setting WSL1 explicitly. 2024-09-19 16:31:31 +02:00
David Given
a55196e7e5 Update ab. 2024-09-19 16:15:27 +02:00
David Given
866d5a2933 Update ab. 2024-09-19 00:55:45 +02:00
David Given
8cba89722b Mutter mutter. 2024-09-12 00:19:11 +02:00
David Given
6d1c623716 Typo fix. 2024-09-12 00:17:26 +02:00
David Given
a8c7ffc77d Update Fedora WSL. 2024-09-12 00:15:16 +02:00
David Given
fb05b6ac6d Merge pull request #769 from davidgiven/ab
Update ab.
2024-09-11 23:17:11 +02:00
David Given
0e83b2e7df Try building with macos-13. 2024-09-11 21:37:22 +02:00
David Given
1baaa4402d Merge. 2024-09-10 21:24:50 +02:00
David Given
2af61e4aca Update ab. 2024-09-10 21:22:17 +02:00
David Given
db235dae5e Merge. 2024-09-10 21:21:54 +02:00
David Given
241878bd0e New fmt requires a slight API change. 2024-09-10 21:12:58 +02:00
David Given
1386e343ec New fmt requires a slight API change. 2024-09-10 21:12:58 +02:00
David Given
9ff51ec8ef Update CI script. 2024-09-10 19:53:44 +02:00
David Given
45036b708f Update CI script. 2024-09-10 19:53:44 +02:00
David Given
ec3b5b10df Adjust default Brother head_bias, and fix the documentation.
Fixes: #768
2024-09-10 18:32:03 +02:00
David Given
4817298dbb Update ab. 2024-09-10 17:55:45 +02:00
David Given
af0ce4cf35 Update ab. 2024-09-02 23:51:03 +02:00
David Given
3c3d8d080c Merge pull request #763 from davidgiven/protos
Encode all the protos in one go (per library), as it's vastly faster.
2024-08-12 17:43:35 +02:00
David Given
dc6af483a5 Remember to build the drivetypes table. 2024-08-12 17:32:13 +02:00
David Given
9a0b487f4b Remember to build the formats table. 2024-08-12 17:26:28 +02:00
David Given
cac4d1ce86 Encode all the protos in one go (per library), as it's vastly faster. 2024-08-12 12:36:39 +02:00
David Given
7a3a31a929 Merge pull request #759 from davidgiven/a2r
Improve the A2R writer.
2024-07-31 23:45:51 +02:00
David Given
eee6f95b15 Typo fix. 2024-07-31 13:48:06 +02:00
David Given
7a3d10451d Rework the A2R writer to be a bit less broken. 2024-07-30 22:54:59 +02:00
David Given
e4f1a5a06f Merge pull request #752 from davidgiven/tartu
Add encoder support for the Tartu format.
2024-05-14 21:48:36 +02:00
David Given
500fcde21b Merge. 2024-05-14 21:41:30 +02:00
David Given
eb363a4b2a Update Tartu documentation. 2024-05-14 21:40:50 +02:00
David Given
8a78e609b0 And fix everywhere else... 2024-05-13 23:41:37 +02:00
David Given
15c67b8cc1 Bash into workingness on OSX. 2024-05-13 23:27:53 +02:00
David Given
00e9c5a07f Add support for updating file metadata (only the SRA bits, really). 2024-05-13 21:44:58 +02:00
David Given
7643457374 Add support for renaming files. 2024-05-13 21:12:42 +02:00
David Given
78d5584e21 Add creeate, put and delete support to the CP/M filesystem driver. 2024-05-13 00:32:57 +02:00
David Given
1d1143a893 Merge from master. 2024-05-10 00:19:57 +02:00
David Given
91093e1304 Merge pull request #754 from davidgiven/greaseweazle
Correctly twiddle DTR on Linux/OSX when changing baud rates.
2024-05-01 19:11:57 +02:00
David Given
1175a06f3d Merge from master. 2024-05-01 16:23:59 +02:00
David Given
6e5abd1189 Merge from master. 2024-05-01 16:23:38 +02:00
David Given
34f97384e7 Merge pull request #753 from davidgiven/osx
Fix OSX build problems.
2024-05-01 16:23:09 +02:00
David Given
653a6a0189 Be more consistent about DTR toggling (needed to reset serial devices). 2024-05-01 12:54:22 +02:00
David Given
f0b1b61eac Merge pull request #749 from p-j-b/fix-hang-windows-adafruit-floppy-greaseweazle
Set DTR after calling SetCommState
2024-05-01 12:46:08 +02:00
David Given
c0fd121bdf Restore build script to normal. 2024-05-01 00:25:41 +02:00
David Given
b805b86ddb Fix truncate arg ordering because of stupid OSX. 2024-05-01 00:25:10 +02:00
David Given
654e7e750c Fix truncate arg ordering because of stupid OSX. 2024-05-01 00:25:10 +02:00
David Given
7501fcfe8b Looks like compiling protobuf files now requires access to the protobuf
libraries.
2024-05-01 00:18:18 +02:00
David Given
fdb7837e03 Looks like compiling protobuf files now requires access to the protobuf
libraries.
2024-05-01 00:18:18 +02:00
David Given
1c57cea483 Try and debug the OSX build failure. 2024-05-01 00:00:30 +02:00
David Given
0c8e8d4d69 Remember to mark the 40-track format as being such. 2024-04-30 23:09:45 +02:00
David Given
8876aae2cc Calculate gaps in bits, not bytes (more accurate). Pad the end of the track to
avoid weirdness reading the last sector.
2024-04-30 23:09:30 +02:00
David Given
3e053b32e2 Display a useful command to repeat a test if one fails. 2024-04-30 23:07:10 +02:00
David Given
0611728537 Don't try to change the build system just yet. 2024-04-30 21:32:58 +02:00
David Given
a84cf83ce5 Add a prototype Tartu encoder. 2024-04-30 00:56:26 +02:00
David Given
c064aa7862 Merge pull request #751 from davidgiven/tartu
Add support for the Tartu Palivere.
2024-04-23 22:21:05 +02:00
David Given
195f7126cc Update link. 2024-04-23 21:58:42 +02:00
David Given
50d466c9c1 Update Tartu documentation. 2024-04-23 21:56:42 +02:00
David Given
5763574634 Update documentation. 2024-04-21 01:18:17 +02:00
David Given
2da568b3e8 Update the Tartu documentation. 2024-04-21 00:35:39 +02:00
David Given
2732d9aec8 Get the Tartu checksums working, and hook up the CP/M filesystem code. 2024-04-21 00:17:11 +02:00
David Given
15d34aff15 Work-in-progress Tartu decoder. 2024-04-20 01:20:49 +02:00
David Given
af3e257c78 Add boilerplate for the Tartu. 2024-04-19 21:10:49 +02:00
p-j-b
c2248c7e4a Added CLRDTR and SETDTR to setBaudRate
Fixes hang in Windows with Adafruit Floppy GreaseWeazle
2024-04-02 13:05:29 +01:00
David Given
a7967b6dc3 More release script tweaks. 2024-03-31 22:50:55 +02:00
David Given
c1f47921e6 Adjust release script. 2024-03-31 22:38:21 +02:00
David Given
cda93d516b Merge pull request #748 from davidgiven/windows
Switch from MSYS builds to WSL/Fedora builds.
2024-03-31 22:31:36 +02:00
David Given
8f9c12b26c Update the release build script. 2024-03-31 22:18:56 +02:00
David Given
cdb4e9ed21 Typo fix. 2024-03-31 22:00:27 +02:00
David Given
1a612c3db5 Pull the .a file out of fallback targets at make time, not at config time. 2024-03-31 21:53:40 +02:00
David Given
72ab957134 Remember to build C++ files with g++. 2024-03-31 21:44:12 +02:00
David Given
621523db62 Try not to build fallback packages unless necessary. 2024-03-31 21:30:26 +02:00
David Given
cdf9cc387c Finally, it works! 2024-03-31 18:42:37 +02:00
David Given
b84ecd289d Again. 2024-03-31 18:25:36 +02:00
David Given
d4cb8f414a Again. 2024-03-31 18:06:19 +02:00
David Given
8180c1f79e Again. 2024-03-31 17:47:16 +02:00
David Given
b23b21a7bf Again. 2024-03-31 17:38:36 +02:00
David Given
b30eff294d Again. 2024-03-31 15:45:54 +02:00
David Given
cacb397198 Again. 2024-03-31 15:36:28 +02:00
David Given
0f8c7d6969 Again. 2024-03-31 15:13:43 +02:00
David Given
7b35232cad Again. 2024-03-31 14:47:44 +02:00
David Given
43624796db Again. 2024-03-31 14:30:52 +02:00
David Given
bc17c624d3 Again. 2024-03-31 14:10:53 +02:00
David Given
61476ea0cc Again. 2024-03-31 13:59:19 +02:00
David Given
f0663030e1 Again. 2024-03-31 13:39:19 +02:00
David Given
989a11931b Add libssp. 2024-03-31 13:27:00 +02:00
David Given
94372a7f09 Again. 2024-03-31 13:12:39 +02:00
David Given
5c2702c6ab Again. 2024-03-31 13:07:07 +02:00
David Given
586c09f5c3 Again. 2024-03-31 12:53:49 +02:00
David Given
7a94123f0d Again. 2024-03-31 12:31:58 +02:00
David Given
4cad34a8a4 Update ccpp.yml 2024-03-31 00:30:14 +01:00
David Given
c0ef3215df Update ccpp.yml 2024-03-30 23:48:40 +01:00
David Given
08982aae7a Update ccpp.yml 2024-03-30 23:34:38 +01:00
David Given
6366ac6639 Update ccpp.yml 2024-03-30 23:08:14 +01:00
David Given
3807d22a78 Update ccpp.yml 2024-03-30 22:57:18 +01:00
David Given
3adbfd7ef5 Update ccpp.yml 2024-03-30 22:52:56 +01:00
David Given
7e40dfa5da Update ccpp.yml 2024-03-30 22:45:15 +01:00
David Given
62ddca2bb4 Update ccpp.yml 2024-03-30 22:44:20 +01:00
David Given
1a59e5065d Update ccpp.yml 2024-03-30 22:36:39 +01:00
David Given
52c0c409e9 Update ccpp.yml 2024-03-30 22:35:28 +01:00
David Given
e74b16d28b Update ccpp.yml 2024-03-30 22:29:37 +01:00
David Given
c2369bcc31 Again. 2024-03-30 22:23:26 +01:00
David Given
2636352a49 Try doing real work. 2024-03-30 23:03:57 +01:00
David Given
222a88f097 Again. 2024-03-30 21:01:14 +01:00
David Given
dbc691726d Again. 2024-03-30 20:55:04 +01:00
David Given
9ff3aedf18 Again. 2024-03-30 20:53:20 +01:00
David Given
8303a1fbca Again. 2024-03-30 20:51:12 +01:00
David Given
0f584ee343 Can we install the Fedora WSL system? 2024-03-30 20:50:18 +01:00
David Given
aafad0a628 Install the rest of the packages. 2024-03-30 20:35:08 +01:00
David Given
9d03951da9 Try installing OpenSUSE packages. 2024-03-30 20:28:53 +01:00
David Given
b15d6cb8e5 Try out a WSL github workflow. 2024-03-30 20:25:42 +01:00
David Given
f9c1816e6f Format. 2024-03-30 19:29:48 +01:00
David Given
d960b020ea Get the executables building on WSL2 Fedora. 2024-03-30 19:14:02 +01:00
David Given
72e9d57b15 Raw import of libfmt. 2024-03-30 14:09:18 +01:00
David Given
968b90dbab Merge pull request #747 from davidgiven/ab
Convert to the latest ab.
2024-03-30 00:35:22 +01:00
David Given
2bccdcc93b Disable more Windows builds. 2024-03-30 00:25:01 +01:00
David Given
ce2a5eb6d9 Disable Windows builds. 2024-03-30 00:23:26 +01:00
David Given
353e4f6210 Try to fix OSX build. 2024-03-30 00:20:56 +01:00
David Given
c115de9d40 Update to build with ab. 2024-03-29 23:53:05 +01:00
David Given
df83b558bf Merge pull request #742 from davidgiven/devices
Don't print the detection banner if no devices were detected.
2024-01-30 22:59:18 +01:00
David Given
7c2b5f116d Don't print the detection banner if no devices were detected. 2024-01-30 22:58:40 +01:00
David Given
30fe75f9bf Merge pull request #741 from davidgiven/devices
Add a command to list detectable devices on the command line.
2024-01-30 22:57:01 +01:00
David Given
401e7a9edb The rpm command now defaults to use real hardware, like it should. 2024-01-30 22:36:16 +01:00
David Given
fd4ddc56f2 Add a command to list detectable devices on the command line. 2024-01-30 21:44:11 +01:00
David Given
83d907bf71 Do parallel builds on OSX. 2024-01-30 21:28:03 +01:00
David Given
327bc76c6e Another try at making OSX builds work. 2024-01-22 22:31:51 +01:00
David Given
fdd39fb2d8 Try to fix OSX builds. 2024-01-22 22:16:49 +01:00
David Given
bfcfa8eb19 Merge pull request #738 from davidgiven/overrides
Fix a whole pile of missing 'override' keywords.
2024-01-22 21:08:27 +01:00
David Given
7095c03e28 Fix a whole pile of missing 'override' keywords. 2024-01-22 20:55:38 +01:00
David Given
45e796f15f Merge pull request #736 from davidgiven/ab
Remove stray files.
2024-01-07 23:20:02 +01:00
David Given
3d1dcd6874 imagemagick is just too much trouble for creating icons, so use png2ico instead. 2024-01-08 00:05:18 +01:00
David Given
0033d0759f 32-bit imagemagick has gone. 2024-01-07 21:29:06 +01:00
David Given
53f7dfe6c9 Remove stray files. 2024-01-07 21:24:53 +01:00
David Given
75446de29b Merge pull request #732 from davidgiven/acorn
Update the acorndfs format so that writes actually work.
2023-12-12 23:45:47 +00:00
David Given
1d119d6921 Update the acorndfs format so that writes actually work. 2023-12-13 00:31:12 +01:00
David Given
7462bd995f Merge pull request #731 from davidgiven/ab
Update ab. You can now build individual tools individually.
2023-12-12 22:31:48 +00:00
David Given
0dd99efad3 Update ab. 2023-12-12 23:17:06 +01:00
David Given
1234e81463 Update ab. You can now build individual tools individually. 2023-12-12 23:11:09 +01:00
David Given
ea13d66e6b Merge pull request #725 from davidgiven/dmk
Add support for DMK directory streams.
2023-11-02 01:35:24 +01:00
David Given
a7cb7eb995 Add missing files... 2023-11-02 01:22:09 +01:00
David Given
29f5feb34d Add support for DMK directory streams. 2023-11-02 01:17:44 +01:00
David Given
5dc60db7b6 When installing files, remember to create the directories. 2023-10-31 00:48:50 +01:00
David Given
fb9f7fe445 Merge pull request #723 from davidgiven/fixes2
Miscellaneous bugfixes.
2023-10-30 23:54:18 +01:00
David Given
a548471652 Add missing files. 2023-10-30 23:35:27 +01:00
David Given
3e47d66644 Put back the standard binaries, tests, install, install-bin makefile targets. 2023-10-30 23:30:18 +01:00
David Given
3bfa45a80c Remember to build with optimisation on. 2023-10-30 23:11:20 +01:00
David Given
2d717af4db The flux and image file pickers can now create new files. 2023-10-30 23:07:36 +01:00
David Given
533b217c8f Eliminate the broken tpi system for a simple drive/format type field. 2023-10-29 21:10:14 +01:00
David Given
ff1fb761f2 Update documentation (last time was wrong). 2023-10-29 21:06:56 +01:00
David Given
95d49add2c Don't show '$formats' for the format list. 2023-10-29 11:18:21 +01:00
David Given
8b75609b70 Update line endings. 2023-10-29 11:18:05 +01:00
David Given
b8929dd589 Fix Windows dependencies. 2023-10-28 13:33:10 +02:00
David Given
2fd29f8786 Merge pull request #720 from davidgiven/build
Rework the build system... again.
2023-10-28 13:17:56 +02:00
David Given
38408820ca Update the release workflow. 2023-10-28 12:52:25 +02:00
David Given
43e6840e78 Try and set the imagemagick time limit? 2023-10-28 00:49:19 +01:00
David Given
15908c52bd Typo fix. 2023-10-27 22:12:46 +01:00
David Given
c90b0e7dc2 Try and fix dependencies again... 2023-10-27 22:10:00 +01:00
David Given
d2ff9806bd Enable build logging. 2023-10-27 21:47:22 +01:00
David Given
1e6993c12d Add missing dependency. 2023-10-27 21:39:55 +01:00
David Given
1122344016 Try to correctly build the manifest this time. 2023-10-27 21:28:47 +01:00
David Given
0dbce00fe4 Try building a Windows manifest. 2023-10-27 21:38:44 +02:00
David Given
5af0b68e06 Add the corpus tests. 2023-10-27 20:43:46 +02:00
dg
6038a11671 Update the README. 2023-10-26 19:31:58 +00:00
dg
dcb92db519 Remove old build system. 2023-10-26 19:29:01 +00:00
dg
dcaeabacc6 --no_warn_duplicate_libraries is apparently too new for github CI... 2023-10-26 19:08:21 +00:00
dg
a2a5c7eff0 Build Windows with all CPUs. 2023-10-26 19:07:43 +00:00
dg
e1cf927bf3 Typo fix. 2023-10-26 18:35:21 +00:00
dg
8fd98d674a Additional windows fixes. 2023-10-26 18:26:10 +00:00
David Given
fd884027c0 Try using the mingw python. 2023-10-26 01:28:58 +02:00
dg
26bd467f79 Make the Windows binaries build. 2023-10-25 21:55:40 +00:00
David Given
c7f22c0dab Build the GUI on OSX. 2023-10-25 22:15:32 +02:00
David Given
92d44f6ae3 Add missing file. 2023-10-25 22:15:09 +02:00
David Given
9143f477b2 Build OSX with all CPUs. 2023-10-25 00:56:45 +02:00
David Given
1a519bf837 Attempt to make build on OSX. 2023-10-25 00:49:39 +02:00
David Given
ca6b90f8c1 Split C and C++ libraries, so that you can use C++ compiler flags. Build with
C++17.
2023-10-24 22:00:09 +02:00
David Given
44fc532d63 Build the documentation. 2023-10-24 00:49:05 +02:00
David Given
6a6cd025c0 Install Python on Windows. 2023-10-24 00:27:04 +02:00
David Given
d769f90704 Increase processor count. 2023-10-23 01:18:33 +02:00
David Given
9d8e3b21ba I think something's wrong with the apt installer. 2023-10-23 01:12:50 +02:00
David Given
dabdfec3e7 Try more setup. 2023-10-23 01:11:15 +02:00
David Given
6a00653d1e Don't use xxd to objectify files. 2023-10-23 01:03:28 +02:00
David Given
8fb786094f Something's wrong with Ubuntu's wx-config setup. 2023-10-23 00:00:29 +02:00
David Given
87e978c817 And again. 2023-10-22 23:10:52 +02:00
David Given
4a31046c9c Adjust dependencies, again... 2023-10-22 23:09:52 +02:00
David Given
db420b3495 Adjust the way packages are detected. 2023-10-22 23:07:20 +02:00
David Given
c81dc166bc Adjust dependencies. 2023-10-22 23:03:44 +02:00
David Given
07aa416975 Make the tests work. 2023-10-22 22:57:54 +02:00
David Given
627820cddc Build the utilities. 2023-10-22 21:35:27 +02:00
David Given
a24fe420c4 We can now build both the CLI and GUI binaries! 2023-10-22 21:20:47 +02:00
David Given
986be921f4 First working command-line executable. 2023-10-22 19:18:14 +02:00
David Given
f5f223f622 First steps towards reworking the build system... again. 2023-10-21 23:02:46 +02:00
David Given
bbdfa0d651 Merge pull request #717 from kristomu/const-correct
Fix const-correct/discards qualifiers error.
2023-09-27 10:08:26 +02:00
K. M
e6bb0cb463 Fix const-correct/discards qualifiers error. 2023-09-27 01:19:17 +02:00
David Given
9e61670116 Merge pull request #656 from davidgiven/psos
Improve pSOS file system handling.
2023-08-20 22:00:51 +02:00
David Given
3876c07164 Merge branch 'master' into psos 2023-08-20 21:42:13 +02:00
David Given
ed315eade9 Merge pull request #668 from davidgiven/ms2000
Add basic support for the MS2000 Microdos file system.
2023-08-19 23:54:27 +02:00
David Given
7456fd0c90 Make the MS2000 stuff work again. Write documentation. 2023-08-19 23:29:55 +02:00
David Given
44160e66ac Merge branch 'master' into ms2000 2023-08-19 22:59:31 +02:00
David Given
9bd969a57b Merge pull request #688 from davidgiven/lif
Add HP9122 support; fix HP9121 support.
2023-08-19 22:55:39 +02:00
David Given
0b585078d8 Merge pull request #704 from ejona86/micropolis-ecc
Micropolis: Add Vector ECC support
2023-08-19 21:54:22 +02:00
David Given
0d495ed934 Merge pull request #710 from davidgiven/usb
Make work on FreeBSD
2023-08-19 21:23:54 +02:00
David Given
95b703b1ea Tidy reporting of USB errors. 2023-08-19 20:46:41 +02:00
David Given
688061397b Adjust error messages. 2023-08-19 20:39:55 +02:00
Poul-Henning Kamp
1f00176455 Make the non-gui executable build on FreeBSD 2023-08-14 19:51:21 +00:00
David Given
90da6b1e72 Merge pull request #706 from ejona86/pkg-config-protobuf
Makefile: Eagerly run pkg-config for protobuf
2023-08-06 01:12:49 +02:00
Eric Anderson
4deb45dc3f Makefile: Eagerly run pkg-config for protobuf
Protobuf added a dependency on absl and now pkg-config is incredibly
slow. `pkg-config --libs protobuf` and `--cflags` each take around 1.5
seconds on my laptop. Running pkg-config only once reduces a 100%
incremental build for 'make all' from 90 seconds to 3.2 seconds.

Unfortunately we will pay the 3 seconds every time we execute make, even
for something that doesn't need protobuf.
2023-08-05 13:02:50 -07:00
David Given
eeec5d106a Update missing file. 2023-08-02 14:08:38 +02:00
David Given
4e42d1d197 Release and ccpp workflows now run in different environments. 2023-08-02 14:08:19 +02:00
David Given
495d08c447 Merge pull request #705 from davidgiven/d20
Update D20 documentation.
2023-08-02 13:46:10 +02:00
David Given
1b859015ae Update documentation. 2023-08-02 13:42:23 +02:00
David Given
3db2109e01 Merge pull request #700 from davidgiven/d20
Add support for the Roland-D20 filesystem.
2023-07-31 23:01:49 +01:00
David Given
294ac87503 Update documentation for the Roland D20 format. 2023-07-31 23:36:45 +02:00
David Given
c297adb0c7 Try to fix Mac builds. 2023-07-31 22:30:52 +02:00
David Given
446b965794 Handle Roland extents properly if the directory entries are in the wrong order.
Deal with block numbers >39 (they go in the bottom of the disk).
2023-07-31 22:20:08 +02:00
Eric Anderson
96d4df296d Micropolis: Add Vector ECC support 2023-07-29 14:03:08 -07:00
David Given
a149aac0e9 Merge pull request #702 from ejona86/micropolis-encodedecode
Micropolis: Fix encoder and decoder to support encodedecodetest
2023-07-29 17:20:58 +01:00
David Given
aacc7be9f3 Merge pull request #703 from ejona86/vgi-hcs
Micropolis: Add missing HCS order for VGI
2023-07-29 17:17:44 +01:00
Eric Anderson
7409955701 Micropolis: Add missing HCS order for VGI 2023-07-29 07:12:35 -07:00
Eric Anderson
c623d95a80 Micropolis: Fix encoder and decoder to support encodedecodetest
These changes should not impact reading/writing from real disks. This
includes a bug fix to Fluxmap where it might miss a trailing interval
when adding bits, as mentioned in #333.

With the Fluxmap bug fixed, the encoder now includes index pulses in its
output. The decoder was relaxed to allow reading precisely one track.

We don't actually add an encodedecodetest for micropolis, though,
because the SCP encoder is unhappy with so many revolutions.
2023-07-29 07:08:23 -07:00
David Given
1927cc7fe1 Fix issue where trying to rename files by clicking on the tree wasn't working. 2023-07-27 23:44:33 +02:00
David Given
4eca254daf Add support for renaming files. 2023-07-27 23:44:04 +02:00
David Given
c7d4fee3f6 Add support for deleting files. 2023-07-27 23:19:50 +02:00
David Given
a6f798ae5b Mangle and demangle filenames. Remember to write the correct extent numbers in
multiextent files.
2023-07-27 23:09:57 +02:00
David Given
c9ae836e52 Add very brittle write support. 2023-07-27 22:49:10 +02:00
David Given
e3ffa63f7f Make sure that the rotational speed is measured even if reads are done through
Browse Disk.
2023-07-27 22:14:48 +02:00
David Given
4ffc2cc1dc Add support for, hopefully, multi-extent files. 2023-07-27 00:30:44 +02:00
David Given
7f9ba14687 Correct erroneous index. 2023-07-26 22:37:56 +02:00
David Given
a24933e272 Merge from master. 2023-07-26 22:33:40 +02:00
David Given
20bdacbecf Add initial support for the Roland-D20 filesystem. 2023-07-26 22:31:20 +02:00
David Given
ab9d6cf5ed Merge pull request #699 from davidgiven/wx
UI improvements
2023-07-25 23:03:02 +01:00
David Given
1f5903a9a0 Don't use std::filesystem; it makes life harder on Windows with its wide
strings.
2023-07-25 23:35:01 +02:00
David Given
bb073b6bb3 Apparently Mingw can't automatically convert from path to string. 2023-07-25 23:23:04 +02:00
David Given
516241f8f5 Replace the image read file picker with a simple one. 2023-07-25 23:11:52 +02:00
David Given
977b6831a0 When reading Kryoflux streams, you can specify any file in the directory and it
will work (as the GUI now forces you to do this).
2023-07-25 22:48:17 +02:00
David Given
c61effb54f Add a file type box to the flux source selection page. 2023-07-25 22:27:09 +02:00
David Given
346d989944 When reading Kryoflux streams, allow the user to specify any file within the
directory and have it work (as that's what the GUI does now).
2023-07-25 22:51:34 +02:00
David Given
60a73c8d1e Add a file type box to the flux source selection page. 2023-07-25 22:27:09 +02:00
dg
e52db4a837 Typo fix. 2023-07-24 20:56:37 +00:00
dg
4e317643bc Try and install compatible versions of protobuf. 2023-07-24 20:53:51 +00:00
David Given
5f520bf375 Merge pull request #698 from davidgiven/zilogmcz
Add support for the ZDOS filesystem for the Zilog MCZ.
2023-07-24 22:16:33 +02:00
David Given
2efe521b3a Update documentation. 2023-07-24 21:48:37 +02:00
David Given
5c21103646 Get the ZDOS filesystem driver working. 2023-07-24 21:46:49 +02:00
David Given
9444696f37 Merge pull request #697 from davidgiven/ro
Allow read-only flux and image in the GUI.
2023-07-24 08:20:39 +02:00
David Given
082fe4e787 Hack in boilerplate for a ZDos filesystem. 2023-07-24 08:18:18 +02:00
David Given
5e13cf23f9 Allow read-only image reader/writers in the GUI. 2023-07-24 07:53:47 +02:00
David Given
8f98a1f557 Consolidate the image constructors in the same way that's been done for the
flux constructors.
2023-07-24 07:50:16 +02:00
David Given
5b21e8798b Allow read-only flux sources in the GUI. 2023-07-24 07:39:59 +02:00
David Given
b9ef5b7db8 Rename all the flux and image types to prefix the enums, due to them being in
the global namespace now.
2023-07-24 02:18:53 +02:00
David Given
9867f8c302 Combine enums for flux source/sink types. config.cc now knows whether they're
read-only, write-only, and read-write.
2023-07-24 00:50:54 +02:00
David Given
315889faf6 Warning fix. 2023-07-23 22:49:23 +02:00
David Given
798e8fee89 Merge pull request #692 from davidgiven/protobuf
Rename the `requires` config field to `prerequisite`
2023-07-08 00:43:15 +02:00
dg
e1c49db329 Use brew --prefix to detect the installation path when copying licenses from
packages.
2023-07-07 22:10:52 +00:00
dg
dae9537472 Warning fixes. 2023-07-07 21:51:24 +00:00
dg
1330d56cdd Fix a bunch of errors caused by changes to libfmt. 2023-07-07 21:32:21 +00:00
David Given
6ce3ce20d0 Remove stray debugging code. 2023-07-07 01:03:31 +02:00
David Given
362c5ee9b0 Rename the requires config field to prerequisite, as requires is about to
become a C++ keyword.
2023-07-07 00:34:03 +02:00
David Given
0f34ce0278 Merge pull request #690 from Deledrius/nsi-fix
Fix incorrect product name in installer.
2023-06-26 14:27:39 +02:00
Joseph Davies
0c27c7c4c8 Fix incorrect product name in installer. 2023-06-25 16:18:03 -07:00
David Given
37595bf73c Update the HP formats to not use the reserved tracks at the end of the disk. 2023-06-15 00:13:28 +02:00
David Given
952aea46ba The HP9122 format appears to be double-sided. 2023-06-13 23:00:00 +02:00
David Given
6a6536cf27 Discover that the HP9121 format is actually 70 track. Add support for the
HP9122 format.
2023-06-13 20:16:41 +02:00
David Given
696368c92a Read LIF volume size information correctly. 2023-06-13 20:08:47 +02:00
David Given
e3edc9327e Don't crash if there is no disk usage data. 2023-06-13 20:08:31 +02:00
David Given
8d2e6a664d Adjust the 264 format to have sector numbers in, hopefully, the right place. 2023-06-13 19:54:46 +02:00
David Given
9db6efe7a2 Merge pull request #686 from davidgiven/docs
Update documentation.
2023-06-03 00:30:34 +02:00
David Given
8b8a22d7fb Add the PCB schematic. 2023-06-03 00:05:51 +02:00
David Given
0a70344bc1 Add Fedora package list. 2023-06-02 23:38:09 +02:00
David Given
e77d01911c Merge pull request #683 from davidgiven/gw
Reset the Greaseweazle data stream when connecting
2023-05-25 22:43:49 +02:00
David Given
d4c0853e1f Reset the Greaseweazle data stream when connecting. 2023-05-25 22:23:28 +02:00
David Given
363a4e959c Finally fix that format error when measuring disk speed. 2023-05-25 22:23:17 +02:00
David Given
9336a04681 Merge pull request #682 from davidgiven/docs
More documentation tweaking.
2023-05-25 22:10:10 +02:00
David Given
214ff38749 Tweak documentation layout. 2023-05-25 22:08:28 +02:00
David Given
a8f3c01d8b Add basic documentation for the extension formats. 2023-05-25 22:06:23 +02:00
David Given
4da6585ef9 Merge pull request #681 from davidgiven/bb679
Allow writing to Greaseweazle disks again by not setting hardSectorThresholdNs to inf.
2023-05-25 21:58:59 +02:00
David Given
df40100feb Merge pull request #680 from davidgiven/docs
Overhaul docs.
2023-05-25 21:40:32 +02:00
David Given
f2d92e93fb Format. 2023-05-25 21:27:49 +02:00
David Given
b4d8d569d2 Allow writing to Greaseweazle disks again by not setting hardSectorThresholdNs
to inf...
2023-05-25 21:26:44 +02:00
David Given
854b3e9c59 Better autogenerated documentation. 2023-05-25 21:14:41 +02:00
David Given
28ca2b72f1 Polishing. 2023-05-25 21:14:32 +02:00
David Given
7781c8179f Typo fix. 2023-05-25 20:20:02 +02:00
David Given
69ece3ffa0 Polish documentation. 2023-05-25 20:07:33 +02:00
David Given
53adcd92ed Spell (and capitalise) Greaseweazle correctly. 2023-05-25 19:50:05 +02:00
David Given
2bef6ca646 Merge pull request #678 from davidgiven/requirements
Overhaul config system and lots of other stuff
2023-05-16 01:29:58 +02:00
dg
bab350d771 Update Ubuntu build version. 2023-05-15 23:09:52 +00:00
dg
048dac223f Enable workflow cancelling when a new one is pushed. 2023-05-15 22:59:59 +00:00
dg
b7634da310 Work around Apple dev kit stupidity (definiting BYTE_SIZE in a standard
header...)
2023-05-15 22:51:16 +00:00
dg
882c92c64a Merge. 2023-05-15 22:49:52 +00:00
dg
4592dbe28b Add drive types for the Micropolis drives. 2023-05-15 22:49:15 +00:00
dg
edc0f21ae7 Remove all the requires TPI constraints --- I'm not sure this is a good idea. 2023-05-15 22:48:33 +00:00
dg
8715b7f6c1 Don't crash if no format is selected. 2023-05-15 22:14:06 +00:00
dg
99511910dd If an incoming FL2 file has no TPI, use the default rather than 0 (the default
will probably be zero, but anyway).
2023-05-15 22:00:03 +00:00
dg
a03478b011 Don't store the actual DriveProto in FL2 files, because it makes the proto tags
significant.
2023-05-15 21:59:24 +00:00
dg
5c428e1f07 Don't require the user to specify the drive TPI if they don't want to. 2023-05-15 21:51:05 +00:00
dg
ee57615735 Deal with invalid options in the GUI. 2023-05-15 20:55:33 +00:00
dg
67300e5769 Add the ability to validate the configuration, at least in the CLI; this may
require some refactoring for the GUI to apply cleanly.
2023-05-14 23:18:48 +00:00
dg
873e05051c Massive rework of the config system to be clearer, more robust, and more
flexible. (But it doesn't check options any more.)
2023-05-14 22:04:51 +00:00
dg
4daaec46a7 Greying out of the option buttons now works; but the whole way configs are
handled is pretty unsatisfactory and needs work.
2023-05-13 23:29:34 +00:00
dg
dd8cc7bfd4 Attempt to move the configuration setup logic into Config, so it's centralised. 2023-05-13 12:42:31 +00:00
dg
5568ac382f Eliminate Environment --- we don't use it and Config contains this
functionality.
2023-05-13 00:04:42 +00:00
dg
dcdb3e4455 Encoders and decoders are routed through Config. 2023-05-12 23:58:44 +00:00
dg
17b29b1626 Flux sinks and image writers are routed through Config. 2023-05-12 23:47:09 +00:00
dg
dcfcc6271c Sort out a whole bunch of other things, including cleaning up the way the
verification source is handled.
2023-05-12 23:28:25 +00:00
dg
1d77ba6429 ImageReaders can now contribute config. 2023-05-12 22:20:13 +00:00
dg
ff5f019ac1 Fetching the image reader is now done through Config. 2023-05-12 21:52:53 +00:00
dg
e61eeb8c6f Fetching the flux source is now done through Config. 2023-05-12 21:25:54 +00:00
dg
68d22e7f54 Fix build error. 2023-05-11 23:31:38 +00:00
dg
790f0a42e3 Move setting the image writer into Config. 2023-05-11 23:06:24 +00:00
dg
08e9e508cc Move setting the image reader into Config. 2023-05-11 23:02:05 +00:00
dg
ad1a8d608f Migrate setting the flux sink to Config. 2023-05-11 22:54:32 +00:00
dg
d74ed71023 Move setting the flux source into Config. 2023-05-11 22:47:00 +00:00
dg
0c7f9e0888 Enforce option requirements --- but the config stuff is still kinda broken and
will need rethinking, especially if flux files can carry configs with them.
2023-05-11 21:58:10 +00:00
dg
ba5f6528a8 Move option handling into Config. 2023-05-11 20:37:54 +00:00
dg
65cf552ec2 Some cleanup. 2023-05-11 20:03:25 +00:00
dg
715c0a0c42 Move config file loading into config.cc. 2023-05-11 19:58:16 +00:00
dg
9e383575d1 Any drive settings in the global config will override loaded settings from an
fl2 file.
2023-05-11 19:21:59 +00:00
dg
d84c366480 You can now fetch config fields by path. 2023-05-11 19:03:36 +00:00
dg
42e6c11081 Migrate to a new global config object. 2023-05-10 23:13:33 +00:00
dg
9ba3f90f1e Change the global config variable to a globalConfig() function. 2023-05-10 22:07:17 +00:00
dg
24ff51274b Fix formatting. 2023-05-10 21:14:30 +00:00
dg
4c4c752827 Add missing file. 2023-05-10 21:11:10 +00:00
dg
5022b67e4a Drive information is stored in FL2 files. 2023-05-10 20:47:55 +00:00
dg
6b990a9f51 Overhaul the TPI stuff; now both the drive and the layout have a TPI setting,
which must be set.
2023-05-10 19:58:44 +00:00
dg
e69ce3b8df Merge. 2023-05-10 18:31:42 +00:00
dg
cf537b6222 Add the proto part of option requirements. 2023-05-10 18:29:46 +00:00
David Given
9d1160faff Merge pull request #677 from davidgiven/errors
Clean up error handling.
2023-05-10 01:13:49 +02:00
noreply@github.com
ed4067f194 Merge pull request #677 from davidgiven/errors
Clean up error handling.
2023-05-09 23:13:49 +00:00
dg
d4b55cd8f5 Switch from Logger() to log(). 2023-05-09 22:47:36 +00:00
dg
baaeb0bca7 Fix mangled formatting caused by clang-format. 2023-05-09 21:39:35 +00:00
dg
466c3c34e5 Replace the Error() object with an error() function which takes fmt
formatspecs, making for much cleaner code. Reformatted everything.

This actually happened in multiple steps but then I corrupted my local
repository and I had to recover from the working tree.
2023-05-09 20:59:44 +00:00
dg
099d7969ca Add the drive types dropdown, plus config fragments. Change the TPI settings to
floats (because 40-track 3.5" uses a TPI of 67.5...).
2023-05-08 23:04:52 +00:00
dg
5adfa95a85 Add a preliminary format for the 8050. 2023-05-08 23:03:37 +00:00
David Given
bfa0846ad0 Merge pull request #676 from davidgiven/doc
Correct index table rendering.
2023-05-08 20:38:53 +02:00
dg
7099264334 Correct index table rendering. 2023-05-08 18:37:16 +00:00
David Given
69b44e7968 Merge pull request #674 from davidgiven/doc
Overhaul documentation.
2023-05-08 01:13:57 +01:00
dg
fe39977ff7 Remember to add links to each profile's documentation. 2023-05-07 23:51:55 +00:00
dg
b9fc8de5b5 OSX compatibility. 2023-05-07 23:33:36 +00:00
dg
f7b8022d3a Switch to the traditional unicorn/dinosaur support categorisation. 2023-05-07 23:06:56 +00:00
dg
a62346c515 Add short names to each profile. 2023-05-07 21:49:14 +00:00
dg
e372d757ad Some tidying. 2023-05-07 21:32:36 +00:00
dg
ab1b10f935 Typo fix. 2023-05-07 21:30:09 +00:00
dg
8e918706dc First draft at autogenerating the table in the README. 2023-05-07 21:28:42 +00:00
dg
76450d00bf Tidy. 2023-05-07 19:53:57 +00:00
dg
ee53542e18 Eliminate config includes, as nothing uses them any more and it just makes
things like documentation generation hard.
2023-05-07 19:35:55 +00:00
dg
db004bc787 Preparse ConfigProto objects. 2023-05-07 19:28:29 +00:00
dg
71a7f3554e Remember to actually add the documentation files... 2023-05-07 18:40:24 +00:00
dg
5c3f422a53 First pass at automatic document generation. 2023-05-07 18:36:30 +00:00
dg
2fe0cec04a Copy documentation into the config definitions. 2023-05-07 16:48:17 +00:00
David Given
de59e781b5 Merge pull request #673 from davidgiven/options
Do more options overhauling.
2023-05-07 13:21:28 +01:00
dg
8c77af651b Run corpus tests on other platforms. 2023-05-07 11:56:32 +00:00
dg
638f6928cf Fix checkouts, maybe? 2023-05-07 11:53:56 +00:00
dg
ccc8e597a7 Don't use vformat, as apparently it's problematic. 2023-05-07 11:49:08 +00:00
dg
585f19d884 More fix. 2023-05-07 11:46:30 +00:00
dg
bb2b7d7df6 Typo fix. 2023-05-07 11:45:07 +00:00
dg
e75d218438 Attempt to run the corpus tests on github for Linux. 2023-05-07 11:44:14 +00:00
dg
7f81b554fd Try to decode the test corpus and make sure there were no decode regressions. 2023-05-07 11:37:50 +00:00
dg
2490f19a1a Add a preliminary option linter. Fix the format errors which showed up. 2023-05-07 00:29:21 +00:00
David Given
30f382bf22 Merge pull request #670 from davidgiven/dmf
Support DMF.
2023-05-07 00:15:13 +01:00
dg
ad03c187cf Merge from master. 2023-05-06 22:45:46 +00:00
David Given
06560b5a5a Merge pull request #672 from davidgiven/usb
Upgrade libusbp.
2023-05-06 23:43:37 +01:00
dg
7c40093698 Try to work around weird test failure on Windows. 2023-05-06 22:30:50 +00:00
dg
d37c75d703 Made test failures log to stdout. 2023-05-06 22:15:01 +00:00
dg
82bfb9a303 Upgrade libusbp. 2023-05-06 21:19:07 +00:00
dg
01682101a6 Update documentation. 2023-05-06 19:59:45 +00:00
dg
3c46f787b1 Always do an update when the state changes, because otherwise certain events
get lost.
2023-05-06 19:21:31 +00:00
dg
591d200283 Adjust DMF gaps. 2023-05-06 19:20:32 +00:00
dg
195534c21e Configure the 1680kB DMF format file system. 2023-05-06 18:11:24 +00:00
dg
0f9d851a29 Adjust the DMF format timings to match that of the Microsoft disk image. 2023-05-06 17:26:56 +00:00
dg
18a03baf99 Display object lengths in the flux viewer. 2023-05-06 15:34:44 +00:00
dg
5e06db4a52 Add preliminary DMF support. 2023-05-06 11:02:09 +00:00
David Given
bf78508ef7 Merge pull request #669 from davidgiven/hplif
Do some LIF enhancement.
2023-05-06 11:38:17 +01:00
dg
137c0340fb Fix month, which was off-by-one. Add custom attributes for the other LIF dirent
properties.
2023-05-06 10:20:10 +00:00
dg
e6d9de2d80 Decode timestamps into a custom property. 2023-05-06 10:16:12 +00:00
dg
d9b319eaed Add textual file types (where known) for LIF files. 2023-05-06 10:00:12 +00:00
dg
ba1f8b8ed8 Add missing file. Reformat. 2023-05-06 00:28:13 +00:00
dg
10605b3908 Add a read-only MS2000 file system, and a format (with no encoder or decoder). 2023-05-06 00:21:10 +00:00
dg
e31e547322 Add a routine to count the number of bits in a word. 2023-05-06 00:20:48 +00:00
dg
f2e713bde3 Stop trying to build for OSX 10.15, because it looks like the github runners
have been turned off.
2023-05-05 23:19:44 +00:00
David Given
94e2e494c9 Merge pull request #667 from davidgiven/options
Overhaul the options system.
2023-05-06 00:18:41 +01:00
dg
5af408e1d1 Add missing file. 2023-05-05 23:07:57 +00:00
dg
77bdc727ab Properly handle default options in the CLI. 2023-05-05 22:57:49 +00:00
dg
eb26426424 Consolidate the Victor formats into each other. 2023-05-05 22:29:26 +00:00
dg
f624bb6e5b Consolidate the Mac formats into each other. 2023-05-05 22:24:28 +00:00
dg
4a8fb9288c Remove obsolete file. 2023-05-05 22:16:11 +00:00
dg
f8f5873973 Consolidate (and typo fix) the ampro format. 2023-05-05 22:15:37 +00:00
dg
5f4903f2d1 Rename the commodore1541 options to be a bit more standard. 2023-05-05 22:07:13 +00:00
dg
b02a894663 Consolidate the Brother formats. 2023-05-05 22:03:49 +00:00
dg
510b530551 Consolidate all the IBM formats together. 2023-05-05 21:37:49 +00:00
dg
c36662205b Typo fix. 2023-05-05 21:18:27 +00:00
dg
a2ffe06792 Consolidate the MX formats into each other. 2023-05-05 21:16:26 +00:00
dg
0f56108bf5 Consolidate the Apple II formats together. 2023-05-05 21:11:06 +00:00
dg
199cefdb71 Fix radiobuttons for multiple option groups. 2023-05-05 21:06:57 +00:00
dg
1bdeaa326c Consolidate some Hewlett-Packard LIF disks together. 2023-05-05 20:46:49 +00:00
dg
cce8cfe88d Consolidate the Tiki 100 formats. 2023-05-05 20:36:39 +00:00
dg
bcfc0217dc Consolidate the Northstar formats into each other. 2023-05-05 20:29:45 +00:00
dg
7cfa080220 Merge from master. 2023-05-05 20:23:17 +00:00
dg
45ebc0f40f Consolidate the Micropolis formats into one. 2023-05-05 20:22:55 +00:00
dg
38d575eda7 Remember to set a default format. 2023-05-05 20:18:53 +00:00
dg
9cb284583b Consolidate all the Atari ST formats together. 2023-05-05 20:15:47 +00:00
dg
137b921e8d Consolidate all the Acorn formats together. 2023-05-05 20:07:44 +00:00
dg
8c876f555d Move from option exclusivity groups to option groups, which are better. 2023-05-05 19:55:56 +00:00
David Given
0988dd524b Merge 2dc649ef09 into 51fa3c5293 2023-05-04 21:10:25 +00:00
dg
2dc649ef09 Add read-only support for LIF filesystems. 2023-05-04 21:04:55 +00:00
dg
baf02cb849 Add support for the HPLIF 616kB format (contributed by Eric Rechlin). 2023-05-04 19:12:51 +00:00
David Given
51fa3c5293 Merge pull request #664 from bdwheele/ibmpc-8-sector-formats
Adding IBM PC 8-sector formats
2023-05-02 12:27:15 +01:00
Brian Wheeler
134dd6c37d Adding IBM PC 8-sector formats 2023-05-01 08:24:24 -04:00
David Given
d766e1f9a9 Merge pull request #663 from ejona86/micropolis-200ms
Micropolis: disk rotate period is 200 ms
2023-04-24 13:12:18 +02:00
Eric Anderson
d298f5b16e Micropolis: disk rotate period is 200 ms
The disks are expected to contain 100,000 bitcells, so clock_period_us
and rotational_period_ms need to align.
2023-04-23 13:54:50 -07:00
dg
9484a1b870 Swap minutes and seconds, as this seems to be more correct. 2023-04-07 16:38:08 +00:00
dg
ed634fbbf6 Fix build failure. 2023-04-07 16:20:32 +00:00
dg
4c776d584b Add read support for A2R v2 files. 2023-04-07 15:00:20 +00:00
David Given
c2c04862a2 Merge pull request #662 from davidgiven/scp
Adjust the SCP write logic so an unspecified TPI is treated as 96.
2023-04-07 11:25:00 +02:00
David Given
624c597735 Merge pull request #661 from davidgiven/scp
Fix reading 48tpi SCP files.
2023-04-06 23:51:30 +02:00
dg
0a5a814a88 Typo fix. 2023-04-05 17:17:15 +00:00
dg
08ce455d1d Properly terminate pSOS filenames. Make a guess at the ctime format. 2023-04-05 17:13:49 +00:00
945 changed files with 102801 additions and 35826 deletions

View File

@@ -2,88 +2,123 @@ name: C/C++ CI
on: [push]
concurrency:
group: environment-${{ github.head_ref }}
cancel-in-progress: true
jobs:
build-linux:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v4
with:
repository: 'davidgiven/fluxengine'
path: 'fluxengine'
- uses: actions/checkout@v4
with:
repository: 'davidgiven/fluxengine-testdata'
path: 'fluxengine-testdata'
- name: apt
run: sudo apt update && sudo apt install libudev-dev libsqlite3-dev protobuf-compiler libwxgtk3.0-gtk3-dev libfmt-dev
- name: make
run: CXXFLAGS="-Wp,-D_GLIBCXX_ASSERTIONS" make -j2
build-macos-current:
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: brew
run: brew install sqlite pkg-config libusb protobuf wxwidgets fmt make coreutils dylibbundler libjpeg
- name: make
run: gmake -j2
- name: Upload build artifacts
uses: actions/upload-artifact@v2
with:
name: ${{ github.event.repository.name }}.${{ github.sha }}
path: FluxEngine.pkg
build-macos-10-15:
runs-on: macos-10.15
steps:
- uses: actions/checkout@v2
- name: brew
run: brew install sqlite pkg-config libusb protobuf wxwidgets fmt make coreutils dylibbundler libjpeg
- name: make
run: |
gmake -j2
mv FluxEngine.pkg FluxEngine-10.15.pkg
sudo apt install libudev-dev libsqlite3-dev protobuf-compiler libwxgtk3.0-gtk3-dev libfmt-dev libprotobuf-dev
- name: make
run: CXXFLAGS="-Wp,-D_GLIBCXX_ASSERTIONS" make -j`nproc` -C fluxengine
- name: Upload build artifacts
uses: actions/upload-artifact@v2
#build-linux-debian-11:
# runs-on: ubuntu-22.04
# container: debian:11
# steps:
# - uses: actions/checkout@v4
# with:
# repository: 'davidgiven/fluxengine'
# path: 'fluxengine'
# - uses: actions/checkout@v4
# with:
# repository: 'davidgiven/fluxengine-testdata'
# path: 'fluxengine-testdata'
# - name: apt update
# run: apt update
# - name: apt
# run: >
# apt install -y python3 make xz-utils python3 python3-hamcrest
# protobuf-compiler libprotobuf-dev libsqlite3-dev
# libfmt-dev libprotobuf-dev wx-common pkg-config
# libudev-dev g++ libwxgtk3.0-gtk3-dev
# - name: make
# run: make -C fluxengine
build-macos-current:
strategy:
matrix:
runs-on: [macos-13, macos-latest]
runs-on: ${{ matrix.runs-on }}
steps:
- uses: actions/checkout@v4
with:
name: ${{ github.event.repository.name }}.${{ github.sha }}
path: FluxEngine-10.15.pkg
repository: 'davidgiven/fluxengine'
path: 'fluxengine'
- uses: actions/checkout@v4
with:
repository: 'davidgiven/fluxengine-testdata'
path: 'fluxengine-testdata'
- name: brew
run: |
brew install sqlite pkg-config libusb protobuf wxwidgets fmt make coreutils dylibbundler libjpeg
- name: make
run: gmake -C fluxengine -j2
- name: Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: ${{ github.event.repository.name }}.${{ github.sha }}.fluxengine.${{ runner.arch }}.pkg
path: fluxengine/FluxEngine.pkg
build-windows:
runs-on: windows-latest
defaults:
run:
shell: msys2 {0}
steps:
- uses: msys2/setup-msys2@v2
- name: setup WSL
run: |
curl -L https://github.com/WhitewaterFoundry/Fedora-Remix-for-WSL/releases/download/41.0.0/Fedora-Remix-for-WSL-SL_41.0.0.0_x64_arm64.msixbundle -o fedora.msixbundle
unzip fedora.msixbundle Fedora-Remix-for-WSL-SL_41.0.0.0_x64.msix
unzip Fedora-Remix-for-WSL-SL_41.0.0.0_x64.msix install.tar.gz
wsl --update
wsl --set-default-version 1
wsl --import fedora fedora install.tar.gz
wsl --set-default fedora
wsl sh -c 'dnf -y install https://github.com/rpmsphere/noarch/raw/master/r/rpmsphere-release-40-1.noarch.rpm'
wsl sh -c 'dnf -y install gcc gcc-c++ protobuf-c-compiler protobuf-devel fmt-devel systemd-devel sqlite-devel wxGTK-devel mingw32-gcc mingw32-gcc-c++ mingw32-zlib-static mingw32-protobuf-static mingw32-sqlite-static mingw32-wxWidgets3-static mingw32-libpng-static mingw32-libjpeg-static mingw32-libtiff-static mingw32-nsis png2ico'
- name: fix line endings
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@v4
with:
update: true
msystem: MINGW32
install: >-
diffutils
make
mingw-w64-i686-fmt
mingw-w64-i686-gcc
mingw-w64-i686-libusb
mingw-w64-i686-pkg-config
mingw-w64-i686-protobuf
mingw-w64-i686-sqlite3
mingw-w64-i686-wxWidgets
mingw-w64-i686-zlib
mingw-w64-i686-nsis
zip
vim
- uses: actions/checkout@v1
- name: build
run: make -j2
repository: 'davidgiven/fluxengine'
path: 'fluxengine'
- uses: actions/checkout@v4
with:
repository: 'davidgiven/fluxengine-testdata'
path: 'fluxengine-testdata'
- name: run
run: |
wsl sh -c 'make -C fluxengine BUILDTYPE=windows -j$(nproc)'
- name: nsis
run: |
strip fluxengine.exe -o fluxengine-stripped.exe
strip fluxengine-gui.exe -o fluxengine-gui-stripped.exe
makensis -v2 -nocd -dOUTFILE=fluxengine-installer.exe extras/windows-installer.nsi
wsl sh -c 'cd fluxengine && strip fluxengine.exe -o fluxengine-stripped.exe'
wsl sh -c 'cd fluxengine && strip fluxengine-gui.exe -o fluxengine-gui-stripped.exe'
wsl sh -c 'cd fluxengine && makensis -v2 -nocd -dOUTFILE=fluxengine-installer.exe extras/windows-installer.nsi'
- name: zip
run: |
zip -9 fluxengine-windows.zip fluxengine.exe fluxengine-gui.exe upgrade-flux-file.exe brother120tool.exe brother240tool.exe FluxEngine.cydsn/CortexM3/ARM_GCC_541/Release/FluxEngine.hex fluxengine-installer.exe
wsl sh -c 'cd fluxengine && zip -9 fluxengine-windows.zip fluxengine.exe fluxengine-gui.exe upgrade-flux-file.exe brother120tool.exe brother240tool.exe FluxEngine.cydsn/CortexM3/ARM_GCC_541/Release/FluxEngine.hex fluxengine-installer.exe'
- name: Upload build artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: ${{ github.event.repository.name }}.${{ github.sha }}
path: fluxengine-windows.zip
name: ${{ github.event.repository.name }}.${{ github.sha }}.windows.zip
path: fluxengine/fluxengine-windows.zip

View File

@@ -1,5 +1,9 @@
name: Autorelease
concurrency:
group: environment-release-${{ github.head_ref }}
cancel-in-progress: true
on:
push:
branches:
@@ -8,43 +12,43 @@ on:
jobs:
dev-release:
runs-on: windows-latest
defaults:
run:
shell: msys2 {0}
steps:
- uses: msys2/setup-msys2@v2
with:
update: true
msystem: MINGW32
install: >-
diffutils
make
mingw-w64-i686-fmt
mingw-w64-i686-gcc
mingw-w64-i686-libusb
mingw-w64-i686-pkg-config
mingw-w64-i686-protobuf
mingw-w64-i686-sqlite3
mingw-w64-i686-wxWidgets
mingw-w64-i686-zlib
mingw-w64-i686-nsis
zip
vim
- uses: actions/checkout@v3
- name: build
steps:
- name: setup WSL
run: |
make -j2
curl -L https://github.com/WhitewaterFoundry/Fedora-Remix-for-WSL/releases/download/41.0.0/Fedora-Remix-for-WSL-SL_41.0.0.0_x64_arm64.msixbundle -o fedora.msixbundle
unzip fedora.msixbundle Fedora-Remix-for-WSL-SL_41.0.0.0_x64.msix
unzip Fedora-Remix-for-WSL-SL_41.0.0.0_x64.msix install.tar.gz
wsl --update
wsl --set-default-version 1
wsl --import fedora fedora install.tar.gz
wsl --set-default fedora
wsl sh -c 'dnf -y install https://github.com/rpmsphere/noarch/raw/master/r/rpmsphere-release-40-1.noarch.rpm'
wsl sh -c 'dnf -y install gcc gcc-c++ protobuf-c-compiler protobuf-devel fmt-devel systemd-devel sqlite-devel wxGTK-devel mingw32-gcc mingw32-gcc-c++ mingw32-zlib-static mingw32-protobuf-static mingw32-sqlite-static mingw32-wxWidgets3-static mingw32-libpng-static mingw32-libjpeg-static mingw32-libtiff-static mingw32-nsis png2ico'
- name: fix line endings
run: |
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@v4
with:
repository: 'davidgiven/fluxengine'
path: 'fluxengine'
- name: run
run: |
wsl sh -c 'cd fluxengine && make BUILDTYPE=windows -j$(nproc)'
- name: nsis
run: |
strip fluxengine.exe -o fluxengine-stripped.exe
strip fluxengine-gui.exe -o fluxengine-gui-stripped.exe
makensis -v2 -nocd -dOUTFILE=fluxengine-installer.exe extras/windows-installer.nsi
wsl sh -c 'cd fluxengine && strip fluxengine.exe -o fluxengine-stripped.exe'
wsl sh -c 'cd fluxengine && strip fluxengine-gui.exe -o fluxengine-gui-stripped.exe'
wsl sh -c 'cd fluxengine && makensis -v2 -nocd -dOUTFILE=fluxengine-installer.exe extras/windows-installer.nsi'
- name: zip
run: |
zip -9 fluxengine.zip fluxengine.exe fluxengine-gui.exe upgrade-flux-file.exe brother120tool.exe brother240tool.exe FluxEngine.cydsn/CortexM3/ARM_GCC_541/Release/FluxEngine.hex
wsl sh -c 'cd fluxengine && zip -9 fluxengine-windows.zip fluxengine.exe fluxengine-gui.exe upgrade-flux-file.exe brother120tool.exe brother240tool.exe FluxEngine.cydsn/CortexM3/ARM_GCC_541/Release/FluxEngine.hex fluxengine-installer.exe'
- name: date
run: |
@@ -55,6 +59,7 @@ jobs:
with:
tag-name: dev
force-branch: false
git-directory: 'fluxengine'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -73,20 +78,27 @@ jobs:
with:
name: Development build ${{ env.RELEASE_DATE }}
files: |
fluxengine.zip
fluxengine-installer.exe
fluxengine/fluxengine.zip
fluxengine/fluxengine-installer.exe
tag_name: dev
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
build-macos:
runs-on: macos-latest
strategy:
matrix:
runs-on: [macos-13, macos-latest]
runs-on: ${{ matrix.runs-on }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: brew
run: brew install sqlite pkg-config libusb protobuf wxwidgets fmt make coreutils dylibbundler libjpeg
- name: make
run: gmake
run: |
gmake -j2
mv FluxEngine.pkg FluxEngine-${{ runner.arch }}.pkg
- name: tag
uses: EndBug/latest-tag@latest
@@ -102,7 +114,7 @@ jobs:
token: ${{ github.token }}
tag: dev
assets: |
FluxEngine.pkg
FluxEngine-${{ runner.arch }}.pkg
fail-if-no-assets: false
- name: release
@@ -110,7 +122,7 @@ jobs:
with:
name: Development build ${{ env.RELEASE_DATE }}
files: |
FluxEngine.pkg
FluxEngine-${{ runner.arch }}.pkg
tag_name: dev
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,4 +1,5 @@
.obj
.git
streams
.*\.flux
.*\.img

304
Makefile
View File

@@ -1,257 +1,111 @@
ifeq ($(BUILDTYPE),)
buildtype_Darwin = osx
buildtype_Haiku = haiku
BUILDTYPE := $(buildtype_$(shell uname -s ))
ifeq ($(BUILDTYPE),)
BUILDTYPE := unix
endif
endif
export BUILDTYPE
ifeq ($(BUILDTYPE),windows)
MINGW = i686-w64-mingw32-
CC = $(MINGW)gcc
CXX = $(MINGW)g++ -std=c++20
CFLAGS += -g -O3
CXXFLAGS += \
-fext-numeric-literals \
-Wno-deprecated-enum-float-conversion \
-Wno-deprecated-enum-enum-conversion
LDFLAGS += -static
AR = $(MINGW)ar
PKG_CONFIG = $(MINGW)pkg-config -static
WINDRES = $(MINGW)windres
WX_CONFIG = /usr/i686-w64-mingw32/sys-root/mingw/bin/wx-config-3.0 --static=yes
EXT = .exe
else
CC = gcc
CXX = g++ -std=c++20
CFLAGS = -g -O3 \
-Wno-deprecated-enum-float-conversion \
-Wno-deprecated-enum-enum-conversion
LDFLAGS =
AR = ar
PKG_CONFIG = pkg-config
ifeq ($(BUILDTYPE),osx)
else
LDFLAGS += -pthread -Wl,--no-as-needed
endif
endif
HOSTCC = gcc
HOSTCXX = g++ -std=c++20
HOSTCFLAGS = -g -O3
HOSTLDFLAGS =
REALOBJ = .obj
OBJ = $(REALOBJ)/$(BUILDTYPE)
DESTDIR ?=
PREFIX ?= /usr/local
BINDIR ?= $(PREFIX)/bin
# Special Windows settings.
ifeq ($(OS), Windows_NT)
EXT ?= .exe
MINGWBIN = /mingw32/bin
CCPREFIX = $(MINGWBIN)/
LUA = $(MINGWBIN)/lua
PKG_CONFIG = $(MINGWBIN)/pkg-config
WX_CONFIG = /usr/bin/sh $(MINGWBIN)/wx-config --static=yes
PROTOC = $(MINGWBIN)/protoc
PLATFORM = WINDOWS
WINDRES = windres
LDFLAGS += \
-static
CXXFLAGS += \
-std=c++17 \
-fext-numeric-literals \
-Wno-deprecated-enum-float-conversion \
-Wno-deprecated-enum-enum-conversion
# Required to get the gcc run-time libraries on the path.
# Required to get the gcc run - time libraries on the path.
export PATH := $(PATH):$(MINGWBIN)
EXT ?= .exe
endif
# Special OSX settings.
ifeq ($(shell uname),Darwin)
PLATFORM = OSX
LDFLAGS += \
-framework IOKit \
-framework Foundation
-framework Foundation
endif
# Check the Make version.
.PHONY: all
all: +all README.md
.PHONY: binaries tests
binaries: all
tests: all
README.md: $(OBJ)/scripts/+mkdocindex/mkdocindex$(EXT)
@echo $(PROGRESSINFO)MKDOC $@
@csplit -s -f$(OBJ)/README. README.md '/<!-- FORMATSSTART -->/' '%<!-- FORMATSEND -->%'
@(cat $(OBJ)/README.00 && $< && cat $(OBJ)/README.01) > README.md
ifeq ($(findstring 4.,$(MAKE_VERSION)),)
$(error You need GNU Make 4.x for this (if you're on OSX, use gmake).)
endif
.PHONY: tests
# Normal settings.
clean::
$(hide) rm -rf $(REALOBJ)
OBJDIR ?= .obj
CCPREFIX ?=
LUA ?= lua
CC ?= $(CCPREFIX)gcc
CXX ?= $(CCPREFIX)g++
AR ?= $(CCPREFIX)ar
PKG_CONFIG ?= pkg-config
WX_CONFIG ?= wx-config
PROTOC ?= protoc
CFLAGS ?= -g -O3
CXXFLAGS += -std=c++17
LDFLAGS ?=
PLATFORM ?= UNIX
TESTS ?= yes
EXT ?=
DESTDIR ?=
PREFIX ?= /usr/local
BINDIR ?= $(PREFIX)/bin
include build/ab.mk
CFLAGS += \
-Iarch \
-Ilib \
-I. \
-I$(OBJDIR)/arch \
-I$(OBJDIR)/lib \
-I$(OBJDIR) \
DOCKERFILES = \
debian11 \
debian12 \
fedora40 \
fedora41 \
manjaro
LDFLAGS += \
-lz \
-lfmt
docker-%: tests/docker/Dockerfile.%
docker build -t $* -f $< .
.SUFFIXES:
.DELETE_ON_ERROR:
define nl
endef
use-library = $(eval $(use-library-impl))
define use-library-impl
$1: $(call $3_LIB)
$1: private LDFLAGS += $(call $3_LDFLAGS)
$2: private CFLAGS += $(call $3_CFLAGS)
endef
use-pkgconfig = $(eval $(use-pkgconfig-impl))
define use-pkgconfig-impl
ifneq ($(strip $(shell $(PKG_CONFIG) $3; echo $$?)),0)
$$(error Missing required pkg-config dependency: $3)
endif
$(1): private LDFLAGS += $(shell $(PKG_CONFIG) --libs $(3))
$(2): private CFLAGS += $(shell $(PKG_CONFIG) --cflags $(3))
endef
.PHONY: all binaries tests clean install install-bin
all: binaries tests
PROTOS = \
arch/aeslanier/aeslanier.proto \
arch/agat/agat.proto \
arch/amiga/amiga.proto \
arch/apple2/apple2.proto \
arch/brother/brother.proto \
arch/c64/c64.proto \
arch/f85/f85.proto \
arch/fb100/fb100.proto \
arch/ibm/ibm.proto \
arch/macintosh/macintosh.proto \
arch/micropolis/micropolis.proto \
arch/mx/mx.proto \
arch/northstar/northstar.proto \
arch/rolandd20/rolandd20.proto \
arch/smaky6/smaky6.proto \
arch/tids990/tids990.proto \
arch/victor9k/victor9k.proto \
arch/zilogmcz/zilogmcz.proto \
lib/common.proto \
lib/config.proto \
lib/decoders/decoders.proto \
lib/drive.proto \
lib/encoders/encoders.proto \
lib/fl2.proto \
lib/fluxsink/fluxsink.proto \
lib/fluxsource/fluxsource.proto \
lib/imagereader/imagereader.proto \
lib/imagewriter/imagewriter.proto \
lib/layout.proto \
lib/usb/usb.proto \
lib/vfs/vfs.proto \
tests/testproto.proto \
PROTO_HDRS = $(patsubst %.proto, $(OBJDIR)/%.pb.h, $(PROTOS))
PROTO_SRCS = $(patsubst %.proto, $(OBJDIR)/%.pb.cc, $(PROTOS))
PROTO_OBJS = $(patsubst %.cc, %.o, $(PROTO_SRCS))
PROTO_CFLAGS = $(shell $(PKG_CONFIG) --cflags protobuf)
$(PROTO_SRCS): | $(PROTO_HDRS)
$(PROTO_OBJS): CFLAGS += $(PROTO_CFLAGS)
PROTO_LIB = $(OBJDIR)/libproto.a
$(PROTO_LIB): $(PROTO_OBJS)
PROTO_LDFLAGS = $(shell $(PKG_CONFIG) --libs protobuf) -pthread $(PROTO_LIB)
.PRECIOUS: $(PROTO_HDRS) $(PROTO_SRCS)
include dep/agg/build.mk
include dep/libusbp/build.mk
include dep/stb/build.mk
include dep/emu/build.mk
include dep/fatfs/build.mk
include dep/adflib/build.mk
include dep/hfsutils/build.mk
include scripts/build.mk
include lib/build.mk
include arch/build.mk
include src/build.mk
include src/gui/build.mk
include tools/build.mk
include tests/build.mk
do-encodedecodetest = $(eval $(do-encodedecodetest-impl))
define do-encodedecodetest-impl
tests: $(OBJDIR)/$1$3.flux.encodedecode
$(OBJDIR)/$1$3.flux.encodedecode: scripts/encodedecodetest.sh $(FLUXENGINE_BIN) $2
@mkdir -p $(dir $$@)
@echo ENCODEDECODETEST $1 flux $(FLUXENGINE_BIN) $2 $3
@scripts/encodedecodetest.sh $1 flux $(FLUXENGINE_BIN) $2 $3 > $$@
tests: $(OBJDIR)/$1$3.scp.encodedecode
$(OBJDIR)/$1$3.scp.encodedecode: scripts/encodedecodetest.sh $(FLUXENGINE_BIN) $2
@mkdir -p $(dir $$@)
@echo ENCODEDECODETEST $1 scp $(FLUXENGINE_BIN) $2 $3
@scripts/encodedecodetest.sh $1 scp $(FLUXENGINE_BIN) $2 $3 > $$@
endef
$(call do-encodedecodetest,agat840)
$(call do-encodedecodetest,amiga)
$(call do-encodedecodetest,appleii140)
$(call do-encodedecodetest,atarist360)
$(call do-encodedecodetest,atarist370)
$(call do-encodedecodetest,atarist400)
$(call do-encodedecodetest,atarist410)
$(call do-encodedecodetest,atarist720)
$(call do-encodedecodetest,atarist740)
$(call do-encodedecodetest,atarist800)
$(call do-encodedecodetest,atarist820)
$(call do-encodedecodetest,bk800)
$(call do-encodedecodetest,brother120)
$(call do-encodedecodetest,brother240)
$(call do-encodedecodetest,commodore1541,scripts/commodore1541_test.textpb,--35)
$(call do-encodedecodetest,commodore1541,scripts/commodore1541_test.textpb,--40)
$(call do-encodedecodetest,commodore1581)
$(call do-encodedecodetest,cmd_fd2000)
$(call do-encodedecodetest,hp9121)
$(call do-encodedecodetest,ibm1200)
$(call do-encodedecodetest,ibm1232)
$(call do-encodedecodetest,ibm1440)
$(call do-encodedecodetest,ibm180)
$(call do-encodedecodetest,ibm360)
$(call do-encodedecodetest,ibm720)
$(call do-encodedecodetest,mac400,scripts/mac400_test.textpb)
$(call do-encodedecodetest,mac800,scripts/mac800_test.textpb)
$(call do-encodedecodetest,n88basic)
$(call do-encodedecodetest,rx50)
$(call do-encodedecodetest,tids990)
$(call do-encodedecodetest,victor9k_ss)
$(call do-encodedecodetest,victor9k_ds)
$(OBJDIR)/%.a:
@mkdir -p $(dir $@)
@echo AR $@
@$(AR) rc $@ $^
%.exe:
@mkdir -p $(dir $@)
@echo LINK $@
@$(CXX) -o $@ $^ $(LDFLAGS) $(LDFLAGS)
$(OBJDIR)/%.o: %.cpp
@mkdir -p $(dir $@)
@echo CXX $<
@$(CXX) $(CFLAGS) $(CXXFLAGS) -MMD -MP -MF $(@:.o=.d) -c -o $@ $<
$(OBJDIR)/%.o: %.cc
@mkdir -p $(dir $@)
@echo CXX $<
@$(CXX) $(CFLAGS) $(CXXFLAGS) -MMD -MP -MF $(@:.o=.d) -c -o $@ $<
$(OBJDIR)/%.o: $(OBJDIR)/%.cc
@mkdir -p $(dir $@)
@echo CXX $<
@$(CXX) $(CFLAGS) $(CXXFLAGS) -MMD -MP -MF $(@:.o=.d) -c -o $@ $<
$(OBJDIR)/%.o: %.c
@mkdir -p $(dir $@)
@echo CC $<
@$(CC) $(CFLAGS) $(CFLAGS) -MMD -MP -MF $(@:.o=.d) -c -o $@ $<
$(OBJDIR)/%.pb.h: %.proto
@mkdir -p $(dir $@)
@echo PROTOC $@
@$(PROTOC) -I. --cpp_out=$(OBJDIR) $<
clean:
rm -rf $(OBJDIR)
install: install-bin # install-man install-docs ...
install-bin: fluxengine$(EXT) fluxengine-gui$(EXT) brother120tool$(EXT) brother240tool$(EXT) upgrade-flux-file$(EXT)
install -d "$(DESTDIR)$(BINDIR)"
for target in $^; do \
install $$target "$(DESTDIR)$(BINDIR)/$$target"; \
done
-include $(OBJS:%.o=%.d)
.PHONY: dockertests
dockertests: $(foreach f,$(DOCKERFILES), docker-$(strip $f) .WAIT)

134
README.md
View File

@@ -4,11 +4,8 @@ FluxEngine
(If you're reading this on GitHub, the formatting's a bit messed up. [Try the
version on cowlark.com instead.](http://cowlark.com/fluxengine/))
**Breaking news!** As of 2022-09-09, there's new [filesystem
support](doc/filesystem.md). Read (and sometimes write) files directly from
(and to) your disks, with eight different file systems! It works in the GUI,
too, which is available for Linux (and other Unix clones), Windows and OSX. See
the details below.
**Breaking news!** As of 2024-10-01, the FluxEngine client software works
(to a point) with [Applesauce](doc/applesauce.md) hardware.
<div style="text-align: center">
<a href="doc/screenshot.jpg"><img src="doc/screenshot.jpg" style="width:60%" alt="screenshot of the GUI in action"></a>
@@ -35,12 +32,14 @@ Don't believe me? Watch the demo reel!
</div>
**New!** The FluxEngine client software now works with
[GreaseWeazle](https://github.com/keirf/Greaseweazle/wiki) hardware. So, if you
can't find a PSoC5 development kit, or don't want to use the Cypress Windows
tools for programming it, you can use one of these instead. Very nearly all
FluxEngine features are available with the GreaseWeazle and it works out-of-the
box. See the [dedicated GreaseWeazle documentation page](doc/greaseweazle.md)
for more information.
[Greaseweazle](https://github.com/keirf/Greaseweazle/wiki) and
[Applesauce](https://applesaucefdc.com/) hardware. So, if you can't find a PSoC5
development kit, or don't want to use the Cypress Windows tools for programming
it, you can use one of these instead. Very nearly all FluxEngine features are
available with the Greaseweazle and it works out-of-the box; the Applesauce is a
bit less supported but still works. See the [dedicated Greaseweazle
documentation page](doc/greaseweazle.md) or the [Applesauce
page](doc/applesauce.md) for more information.
Where?
------
@@ -65,7 +64,7 @@ following friendly articles:
- [Using a FluxEngine](doc/using.md) ∾ what to do with your new hardware ∾
flux files and image files ∾ knowing what you're doing
- [Using GreaseWeazle hardware with the FluxEngine client
- [Using Greaseweazle hardware with the FluxEngine client
software](doc/greaseweazle.md) ∾ what works ∾ what doesn't work ∾ where to
go for help
@@ -88,63 +87,64 @@ Which?
The current support state is as follows.
Dinosaurs (🦖) have yet to be observed in real life --- I've written the
decoder based on Kryoflux (or other) dumps I've found. I don't (yet) have
real, physical disks in my hand to test the capture process.
Dinosaurs (🦖) have yet to be observed in real life --- I've written the encoder
and/or decoder based on Kryoflux (or other) dumps I've found. I don't (yet) have
real, physical disks in my hand to test the capture process, or hardware to
verify that written disks work.
Unicorns (🦄) are completely real --- this means that I've read actual,
physical disks with these formats and so know they work (or had reports from
people who've had it work).
Unicorns (🦄) are completely real --- this means that I've read actual, physical
disks with these formats and/or written real, physical disks and then used them
on real hardware, and so know they work (or had reports from people who've had
it work).
### Old disk formats
If a filesystem is listed, this means that FluxEngine natively supports that
particular filesystem and can read (and sometimes write, support varies) files
directly from disks, flux files or disk images. Some formats have multiple
choices because they can store multiple types of file system.
| Format | Read? | Write? | Notes |
|:------------------------------------------|:-----:|:------:|-------|
| [IBM PC compatible](doc/disk-ibm.md) | 🦄 | 🦄 | and compatibles (like the Atari ST) |
| [Atari ST](doc/disk-atarist.md) | 🦄 | 🦄 | technically the same as IBM, almost |
| [Acorn ADFS](doc/disk-acornadfs.md) | 🦄 | 🦖* | single- and double- sided |
| [Acorn DFS](doc/disk-acorndfs.md) | 🦄 | 🦖* | |
| [Ampro Little Board](doc/disk-ampro.md) | 🦖 | 🦖* | |
| [Agat](doc/disk-agat.md) | 🦖 | | Soviet Union Apple-II-like computer |
| [Apple II](doc/disk-apple2.md) | 🦄 | 🦄 | both 140kB and 640kB formats |
| [Amiga](doc/disk-amiga.md) | 🦄 | 🦄 | |
| [Commodore 64 1541/1581](doc/disk-c64.md) | 🦄 | 🦄 | and probably the other formats |
| [Brother 120kB](doc/disk-brother.md) | 🦄 | 🦄 | |
| [Brother 240kB](doc/disk-brother.md) | 🦄 | 🦄 | |
| [Brother FB-100](doc/disk-fb100.md) | 🦖 | | Tandy Model 100, Husky Hunter, knitting machines |
| [Elektronika BK](doc/disk-bd.md) | 🦄 | 🦄 | Soviet Union PDP-11 clone |
| [Macintosh 400kB/800kB](doc/disk-macintosh.md) | 🦄 | 🦄 | |
| [NEC PC-98](doc/disk-ibm.md) | 🦄 | 🦄 | trimode drive not required |
| [pSOS](doc/disk-ibm.md) | 🦄 | 🦖* | pSOS PHILE file system |
| [Sharp X68000](doc/disk-ibm.md) | 🦄 | 🦄 | yet another IBM scheme |
| [Smaky 6](doc/disk-smaky6.md) | 🦖 | | 5.25" hard sectored |
| [TRS-80](doc/disk-trs80.md) | 🦖 | 🦖* | a minor variation of the IBM scheme |
<!-- FORMATSSTART -->
<!-- This section is automatically generated. Do not edit. -->
| Profile | Format | Read? | Write? | Filesystem? |
|:--------|:-------|:-----:|:------:|:------------|
| [`acornadfs`](doc/disk-acornadfs.md) | Acorn ADFS: BBC Micro, Archimedes | 🦖 | | |
| [`acorndfs`](doc/disk-acorndfs.md) | Acorn DFS: Acorn Atom, BBC Micro series | 🦄 | | ACORNDFS |
| [`aeslanier`](doc/disk-aeslanier.md) | AES Lanier "No Problem": 616kB 5.25" 77-track SSDD hard sectored | 🦖 | | |
| [`agat`](doc/disk-agat.md) | Agat: 840kB 5.25" 80-track DS | 🦖 | 🦖 | |
| [`amiga`](doc/disk-amiga.md) | Amiga: 880kB 3.5" DSDD | 🦄 | 🦄 | AMIGAFFS |
| [`ampro`](doc/disk-ampro.md) | Ampro Little Board: CP/M | 🦖 | | CPMFS |
| [`apple2`](doc/disk-apple2.md) | Apple II: Prodos, Appledos, and CP/M | 🦄 | 🦄 | APPLEDOS CPMFS PRODOS |
| [`atarist`](doc/disk-atarist.md) | Atari ST: Almost PC compatible | 🦄 | 🦄 | |
| [`bk`](doc/disk-bk.md) | BK: 800kB 5.25"/3.5" 80-track 10-sector DSDD | 🦖 | 🦖 | |
| [`brother`](doc/disk-brother.md) | Brother word processors: GCR family | 🦄 | 🦄 | BROTHER120 FATFS |
| [`commodore`](doc/disk-commodore.md) | Commodore: 1541, 1581, 8050 and variations | 🦄 | 🦄 | CBMFS |
| [`eco1`](doc/disk-eco1.md) | VDS Eco1: CP/M; 1210kB 77-track mixed format DSHD | 🦖 | | CPMFS |
| [`epsonpf10`](doc/disk-epsonpf10.md) | Epson PF-10: CP/M; 3.5" 40-track DSDD | 🦖 | | CPMFS |
| [`f85`](doc/disk-f85.md) | Durango F85: 461kB 5.25" 77-track SS | 🦖 | | |
| [`fb100`](doc/disk-fb100.md) | Brother FB-100: 100kB 3.5" 40-track SSSD | 🦖 | | |
| [`hplif`](doc/disk-hplif.md) | Hewlett-Packard LIF: a variety of disk formats used by HP | 🦄 | 🦄 | LIF |
| [`ibm`](doc/disk-ibm.md) | IBM PC: Generic PC 3.5"/5.25" disks | 🦄 | 🦄 | FATFS |
| [`icl30`](doc/disk-icl30.md) | ICL Model 30: CP/M; 263kB 35-track DSSD | 🦖 | | CPMFS |
| [`juku`](doc/disk-juku.md) | Juku E5104: CP/M | | | CPMFS |
| [`mac`](doc/disk-mac.md) | Macintosh: 400kB/800kB 3.5" GCR | 🦄 | 🦄 | MACHFS |
| [`micropolis`](doc/disk-micropolis.md) | Micropolis: 100tpi MetaFloppy disks | 🦄 | 🦄 | |
| [`ms2000`](doc/disk-ms2000.md) | : MS2000 Microdisk Development System | | | MICRODOS |
| [`mx`](doc/disk-mx.md) | DVK MX: Soviet-era PDP-11 clone | 🦖 | | |
| [`n88basic`](doc/disk-n88basic.md) | N88-BASIC: PC8800/PC98 5.25" 77-track 26-sector DSHD | 🦄 | 🦄 | |
| [`northstar`](doc/disk-northstar.md) | Northstar: 5.25" hard sectored | 🦄 | 🦄 | |
| [`psos`](doc/disk-psos.md) | pSOS: 800kB DSDD with PHILE | 🦄 | 🦄 | PHILE |
| [`rolandd20`](doc/disk-rolandd20.md) | Roland D20: 3.5" electronic synthesiser disks | 🦄 | 🦖 | ROLAND |
| [`rx50`](doc/disk-rx50.md) | Digital RX50: 400kB 5.25" 80-track 10-sector SSDD | 🦖 | 🦖 | |
| [`smaky6`](doc/disk-smaky6.md) | Smaky 6: 308kB 5.25" 77-track 16-sector SSDD, hard sectored | 🦖 | | SMAKY6 |
| [`tartu`](doc/disk-tartu.md) | Tartu: The Palivere and variations | 🦄 | 🦖 | CPMFS |
| [`ti99`](doc/disk-ti99.md) | TI-99: 90kB 35-track SSSD | 🦖 | | |
| [`tids990`](doc/disk-tids990.md) | Texas Instruments DS990: 1126kB 8" DSSD | 🦖 | 🦖 | |
| [`tiki`](doc/disk-tiki.md) | Tiki 100: CP/M | | | CPMFS |
| [`victor9k`](doc/disk-victor9k.md) | Victor 9000 / Sirius One: 1224kB 5.25" DSDD GCR | 🦖 | 🦖 | |
| [`zilogmcz`](doc/disk-zilogmcz.md) | Zilog MCZ: 320kB 8" 77-track SSSD hard-sectored | 🦖 | | ZDOS |
{: .datatable }
`*`: these formats are variations of the generic IBM format, and since the
IBM writer is completely generic, it should be configurable for these
formats... theoretically. I don't have the hardware to try it.
### Even older disk formats
These formats are for particularly old, weird architectures, even by the
standards of floppy disks. They've largely been implemented from single flux
files with no access to physical hardware. Typically the reads were pretty
bad and I've had to make a number of guesses as to how things work. They do,
at least, check the CRC so what data's there is probably good.
| Format | Read? | Write? | Notes |
|:-----------------------------------------|:-----:|:------:|-------|
| [AES Superplus / No Problem](doc/disk-aeslanier.md) | 🦖 | | hard sectors! |
| [Durango F85](doc/disk-durangof85.md) | 🦖 | | 5.25" |
| [DVK MX](doc/disk-mx.md) | 🦖 | | Soviet PDP-11 clone |
| [VDS Eco1](doc/disk-eco1.md) | 🦖 | | 8" mixed format |
| [Micropolis](doc/disk-micropolis.md) | 🦄 | | Micropolis 100tpi drives |
| [Northstar](doc/disk-northstar.md) | 🦖 | 🦖 | 5.25" hard sectors |
| [TI DS990 FD1000](doc/disk-tids990.md) | 🦄 | 🦄 | 8" |
| [Victor 9000](doc/disk-victor9k.md) | 🦖 | | 5.25" GCR encoded |
| [Zilog MCZ](doc/disk-zilogmcz.md) | 🦖 | | 8" _and_ hard sectors |
{: .datatable }
<!-- FORMATSEND -->
### Notes
@@ -259,9 +259,11 @@ package, written by Robert Leslie et al, taken from
https://www.mars.org/home/rob/proj/hfs. It is GPL 2.0 licensed. Please see the
contents of the directory for the full text.
As an exception, `dep/lexy` contains a partial copy of the lexy package, written
by foonathen@github, taken from https://github.com/foonathan/lexy. It is BSL 1.0
licensed. Please see the contents of the directory for the full text.
__Important:__ Because of all these exceptions, if you distribute the
FluxEngine package as a whole, you must comply with the terms of _all_ of the
licensing terms. This means that __effectively the FluxEngine package is
distributable under the terms of the GPL 2.0__.

View File

@@ -2,9 +2,10 @@
#define AESLANIER_H
#define AESLANIER_RECORD_SEPARATOR 0x55555122
#define AESLANIER_SECTOR_LENGTH 256
#define AESLANIER_RECORD_SIZE (AESLANIER_SECTOR_LENGTH + 5)
#define AESLANIER_SECTOR_LENGTH 256
#define AESLANIER_RECORD_SIZE (AESLANIER_SECTOR_LENGTH + 5)
extern std::unique_ptr<Decoder> createAesLanierDecoder(const DecoderProto& config);
extern std::unique_ptr<Decoder> createAesLanierDecoder(
const DecoderProto& config);
#endif

View File

@@ -1,66 +1,65 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "aeslanier.h"
#include "crc.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "sector.h"
#include "bytes.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "arch/aeslanier/aeslanier.h"
#include "lib/core/crc.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/data/sector.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include <string.h>
static const FluxPattern SECTOR_PATTERN(32, AESLANIER_RECORD_SEPARATOR);
/* This is actually M2FM, rather than MFM, but it our MFM/FM decoder copes fine with it. */
/* This is actually M2FM, rather than MFM, but it our MFM/FM decoder copes fine
* with it. */
class AesLanierDecoder : public Decoder
{
public:
AesLanierDecoder(const DecoderProto& config):
Decoder(config)
{}
AesLanierDecoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(SECTOR_PATTERN);
}
{
return seekToPattern(SECTOR_PATTERN);
}
void decodeSectorRecord() override
{
/* Skip ID mark (we know it's a AESLANIER_RECORD_SEPARATOR). */
{
/* Skip ID mark (we know it's a AESLANIER_RECORD_SEPARATOR). */
readRawBits(16);
readRawBits(16);
const auto& rawbits = readRawBits(AESLANIER_RECORD_SIZE*16);
const auto& bytes = decodeFmMfm(rawbits).slice(0, AESLANIER_RECORD_SIZE);
const auto& reversed = bytes.reverseBits();
const auto& rawbits = readRawBits(AESLANIER_RECORD_SIZE * 16);
const auto& bytes =
decodeFmMfm(rawbits).slice(0, AESLANIER_RECORD_SIZE);
const auto& reversed = bytes.reverseBits();
_sector->logicalTrack = reversed[1];
_sector->logicalSide = 0;
_sector->logicalSector = reversed[2];
_sector->logicalTrack = reversed[1];
_sector->logicalSide = 0;
_sector->logicalSector = reversed[2];
/* Check header 'checksum' (which seems far too simple to mean much). */
/* Check header 'checksum' (which seems far too simple to mean much). */
{
uint8_t wanted = reversed[3];
uint8_t got = reversed[1] + reversed[2];
if (wanted != got)
return;
}
{
uint8_t wanted = reversed[3];
uint8_t got = reversed[1] + reversed[2];
if (wanted != got)
return;
}
/* Check data checksum, which also includes the header and is
* significantly better. */
/* Check data checksum, which also includes the header and is
* significantly better. */
_sector->data = reversed.slice(1, AESLANIER_SECTOR_LENGTH);
uint16_t wanted = reversed.reader().seek(0x101).read_le16();
uint16_t got = crc16ref(MODBUS_POLY_REF, _sector->data);
_sector->status = (wanted == got) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->data = reversed.slice(1, AESLANIER_SECTOR_LENGTH);
uint16_t wanted = reversed.reader().seek(0x101).read_le16();
uint16_t got = crc16ref(MODBUS_POLY_REF, _sector->data);
_sector->status = (wanted == got) ? Sector::OK : Sector::BAD_CHECKSUM;
}
};
std::unique_ptr<Decoder> createAesLanierDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new AesLanierDecoder(config));
return std::unique_ptr<Decoder>(new AesLanierDecoder(config));
}

View File

@@ -1,22 +1,20 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "agat.h"
#include "bytes.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "arch/agat/agat.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
uint8_t agatChecksum(const Bytes& bytes)
{
uint16_t checksum = 0;
for (uint8_t b : bytes)
{
if (checksum > 0xff)
checksum = (checksum + 1) & 0xff;
for (uint8_t b : bytes)
{
if (checksum > 0xff)
checksum = (checksum + 1) & 0xff;
checksum += b;
}
checksum += b;
}
return checksum & 0xff;
return checksum & 0xff;
}

View File

@@ -17,4 +17,3 @@ extern std::unique_ptr<Encoder> createAgatEncoder(const EncoderProto& config);
extern uint8_t agatChecksum(const Bytes& bytes);
#endif

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message AgatDecoderProto {}

View File

@@ -1,21 +1,23 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "agat.h"
#include "crc.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "sector.h"
#include "bytes.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "arch/agat/agat.h"
#include "lib/core/crc.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/data/sector.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include <string.h>
// clang-format off
/*
* data: X X X X X X X X X - - X - X - X - X X - X - X - = 0xff956a
* flux: 01 01 01 01 01 01 01 01 01 00 10 01 00 01 00 01 00 01 01 00 01 00 01 00 = 0x555549111444
*
* data: X X X X X X X X - X X - X - X - X - - X - X - X = 0xff6a95
* flux: 01 01 01 01 01 01 01 01 00 01 01 00 01 00 01 00 01 00 10 01 00 01 00 01 = 0x555514444911
*
*
* Each pattern is prefixed with this one:
*
* data: - - - X - - X - = 0x12
@@ -30,65 +32,59 @@
* 0100010010010010 = MFM encoded
* 1000100100100100 = with trailing zero
* - - - X - - X - = effective bitstream = 0x12
*
*/
// clang-format on
static const FluxPattern SECTOR_PATTERN(64, SECTOR_ID);
static const FluxPattern DATA_PATTERN(64, DATA_ID);
static const FluxMatchers ALL_PATTERNS = {
&SECTOR_PATTERN,
&DATA_PATTERN
};
static const FluxMatchers ALL_PATTERNS = {&SECTOR_PATTERN, &DATA_PATTERN};
class AgatDecoder : public Decoder
{
public:
AgatDecoder(const DecoderProto& config):
Decoder(config)
{}
AgatDecoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(ALL_PATTERNS);
}
{
return seekToPattern(ALL_PATTERNS);
}
void decodeSectorRecord() override
{
if (readRaw64() != SECTOR_ID)
return;
{
if (readRaw64() != SECTOR_ID)
return;
auto bytes = decodeFmMfm(readRawBits(64)).slice(0, 4);
if (bytes[3] != 0x5a)
return;
auto bytes = decodeFmMfm(readRawBits(64)).slice(0, 4);
if (bytes[3] != 0x5a)
return;
_sector->logicalTrack = bytes[1] >> 1;
_sector->logicalSector = bytes[2];
_sector->logicalSide = bytes[1] & 1;
_sector->status = Sector::DATA_MISSING; /* unintuitive but correct */
}
_sector->logicalTrack = bytes[1] >> 1;
_sector->logicalSector = bytes[2];
_sector->logicalSide = bytes[1] & 1;
_sector->status = Sector::DATA_MISSING; /* unintuitive but correct */
}
void decodeDataRecord() override
{
if (readRaw64() != DATA_ID)
return;
void decodeDataRecord() override
{
if (readRaw64() != DATA_ID)
return;
Bytes bytes = decodeFmMfm(readRawBits((AGAT_SECTOR_SIZE+2)*16)).slice(0, AGAT_SECTOR_SIZE+2);
Bytes bytes = decodeFmMfm(readRawBits((AGAT_SECTOR_SIZE + 2) * 16))
.slice(0, AGAT_SECTOR_SIZE + 2);
if (bytes[AGAT_SECTOR_SIZE+1] != 0x5a)
return;
if (bytes[AGAT_SECTOR_SIZE + 1] != 0x5a)
return;
_sector->data = bytes.slice(0, AGAT_SECTOR_SIZE);
uint8_t wantChecksum = bytes[AGAT_SECTOR_SIZE];
uint8_t gotChecksum = agatChecksum(_sector->data);
_sector->status = (wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->data = bytes.slice(0, AGAT_SECTOR_SIZE);
uint8_t wantChecksum = bytes[AGAT_SECTOR_SIZE];
uint8_t gotChecksum = agatChecksum(_sector->data);
_sector->status =
(wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
};
std::unique_ptr<Decoder> createAgatDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new AgatDecoder(config));
return std::unique_ptr<Decoder>(new AgatDecoder(config));
}

View File

@@ -1,11 +1,11 @@
#include "lib/globals.h"
#include "lib/core/globals.h"
#include "lib/core/utils.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "agat.h"
#include "lib/crc.h"
#include "lib/readerwriter.h"
#include "lib/image.h"
#include "lib/layout.h"
#include "arch/agat/agat.h"
#include "lib/core/crc.h"
#include "lib/data/image.h"
#include "lib/data/layout.h"
#include "arch/agat/agat.pb.h"
#include "lib/encoders/encoders.pb.h"
@@ -95,7 +95,7 @@ public:
}
if (_cursor >= _bits.size())
Error() << "track data overrun";
error("track data overrun");
fillBitmapTo(_bits, _cursor, _bits.size(), {true, false});
auto fluxmap = std::make_unique<Fluxmap>();

View File

@@ -1,7 +1,7 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "amiga.h"
#include "bytes.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "arch/amiga/amiga.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
uint32_t amigaChecksum(const Bytes& bytes)
@@ -18,61 +18,61 @@ uint32_t amigaChecksum(const Bytes& bytes)
static uint8_t everyother(uint16_t x)
{
/* aabb ccdd eeff gghh */
x &= 0x6666; /* 0ab0 0cd0 0ef0 0gh0 */
x >>= 1; /* 00ab 00cd 00ef 00gh */
x |= x << 2; /* abab cdcd efef ghgh */
x &= 0x3c3c; /* 00ab cd00 00ef gh00 */
x >>= 2; /* 0000 abcd 0000 efgh */
x |= x >> 4; /* 0000 abcd abcd efgh */
return x;
/* aabb ccdd eeff gghh */
x &= 0x6666; /* 0ab0 0cd0 0ef0 0gh0 */
x >>= 1; /* 00ab 00cd 00ef 00gh */
x |= x << 2; /* abab cdcd efef ghgh */
x &= 0x3c3c; /* 00ab cd00 00ef gh00 */
x >>= 2; /* 0000 abcd 0000 efgh */
x |= x >> 4; /* 0000 abcd abcd efgh */
return x;
}
Bytes amigaInterleave(const Bytes& input)
{
Bytes output;
ByteWriter bw(output);
Bytes output;
ByteWriter bw(output);
/* Write all odd bits. (Numbering starts at 0...) */
/* Write all odd bits. (Numbering starts at 0...) */
{
ByteReader br(input);
while (!br.eof())
{
uint16_t x = br.read_be16();
x &= 0xaaaa; /* a0b0 c0d0 e0f0 g0h0 */
x |= x >> 1; /* aabb ccdd eeff gghh */
x = everyother(x); /* 0000 0000 abcd efgh */
bw.write_8(x);
}
}
{
ByteReader br(input);
while (!br.eof())
{
uint16_t x = br.read_be16();
x &= 0xaaaa; /* a0b0 c0d0 e0f0 g0h0 */
x |= x >> 1; /* aabb ccdd eeff gghh */
x = everyother(x); /* 0000 0000 abcd efgh */
bw.write_8(x);
}
}
/* Write all even bits. */
/* Write all even bits. */
{
ByteReader br(input);
while (!br.eof())
{
uint16_t x = br.read_be16();
x &= 0x5555; /* 0a0b 0c0d 0e0f 0g0h */
x |= x << 1; /* aabb ccdd eeff gghh */
x = everyother(x); /* 0000 0000 abcd efgh */
bw.write_8(x);
}
}
{
ByteReader br(input);
while (!br.eof())
{
uint16_t x = br.read_be16();
x &= 0x5555; /* 0a0b 0c0d 0e0f 0g0h */
x |= x << 1; /* aabb ccdd eeff gghh */
x = everyother(x); /* 0000 0000 abcd efgh */
bw.write_8(x);
}
}
return output;
return output;
}
Bytes amigaDeinterleave(const uint8_t*& input, size_t len)
{
assert(!(len & 1));
const uint8_t* odds = &input[0];
const uint8_t* evens = &input[len/2];
const uint8_t* evens = &input[len / 2];
Bytes output;
ByteWriter bw(output);
for (size_t i=0; i<len/2; i++)
for (size_t i = 0; i < len / 2; i++)
{
uint8_t o = *odds++;
uint8_t e = *evens++;
@@ -81,11 +81,15 @@ Bytes amigaDeinterleave(const uint8_t*& input, size_t len)
* http://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN
*/
uint16_t result =
(((e * 0x0101010101010101ULL & 0x8040201008040201ULL)
* 0x0102040810204081ULL >> 49) & 0x5555) |
(((o * 0x0101010101010101ULL & 0x8040201008040201ULL)
* 0x0102040810204081ULL >> 48) & 0xAAAA);
(((e * 0x0101010101010101ULL & 0x8040201008040201ULL) *
0x0102040810204081ULL >>
49) &
0x5555) |
(((o * 0x0101010101010101ULL & 0x8040201008040201ULL) *
0x0102040810204081ULL >>
48) &
0xAAAA);
bw.write_be16(result);
}
@@ -95,6 +99,6 @@ Bytes amigaDeinterleave(const uint8_t*& input, size_t len)
Bytes amigaDeinterleave(const Bytes& input)
{
const uint8_t* ptr = input.cbegin();
return amigaDeinterleave(ptr, input.size());
const uint8_t* ptr = input.cbegin();
return amigaDeinterleave(ptr, input.size());
}

View File

@@ -1,7 +1,7 @@
#ifndef AMIGA_H
#define AMIGA_H
#include "encoders/encoders.h"
#include "lib/encoders/encoders.h"
#define AMIGA_SECTOR_RECORD 0xaaaa44894489LL

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message AmigaDecoderProto {}

View File

@@ -1,80 +1,85 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "amiga.h"
#include "bytes.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/amiga/amiga.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include "lib/decoders/decoders.pb.h"
#include <string.h>
#include <algorithm>
/*
/*
* Amiga disks use MFM but it's not quite the same as IBM MFM. They only use
* a single type of record with a different marker byte.
*
*
* See the big comment in the IBM MFM decoder for the gruesome details of how
* MFM works.
*/
static const FluxPattern SECTOR_PATTERN(48, AMIGA_SECTOR_RECORD);
class AmigaDecoder : public Decoder
{
public:
AmigaDecoder(const DecoderProto& config):
Decoder(config),
_config(config.amiga())
{}
AmigaDecoder(const DecoderProto& config):
Decoder(config),
_config(config.amiga())
{
}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(SECTOR_PATTERN);
}
{
return seekToPattern(SECTOR_PATTERN);
}
void decodeSectorRecord() override
{
if (readRaw48() != AMIGA_SECTOR_RECORD)
return;
const auto& rawbits = readRawBits(AMIGA_RECORD_SIZE*16);
if (rawbits.size() < (AMIGA_RECORD_SIZE*16))
return;
const auto& rawbytes = toBytes(rawbits).slice(0, AMIGA_RECORD_SIZE*2);
const auto& bytes = decodeFmMfm(rawbits).slice(0, AMIGA_RECORD_SIZE);
{
if (readRaw48() != AMIGA_SECTOR_RECORD)
return;
const uint8_t* ptr = bytes.begin();
const auto& rawbits = readRawBits(AMIGA_RECORD_SIZE * 16);
if (rawbits.size() < (AMIGA_RECORD_SIZE * 16))
return;
const auto& rawbytes = toBytes(rawbits).slice(0, AMIGA_RECORD_SIZE * 2);
const auto& bytes = decodeFmMfm(rawbits).slice(0, AMIGA_RECORD_SIZE);
Bytes header = amigaDeinterleave(ptr, 4);
Bytes recoveryinfo = amigaDeinterleave(ptr, 16);
const uint8_t* ptr = bytes.begin();
_sector->logicalTrack = header[1] >> 1;
_sector->logicalSide = header[1] & 1;
_sector->logicalSector = header[2];
Bytes header = amigaDeinterleave(ptr, 4);
Bytes recoveryinfo = amigaDeinterleave(ptr, 16);
uint32_t wantedheaderchecksum = amigaDeinterleave(ptr, 4).reader().read_be32();
uint32_t gotheaderchecksum = amigaChecksum(rawbytes.slice(0, 40));
if (gotheaderchecksum != wantedheaderchecksum)
return;
_sector->logicalTrack = header[1] >> 1;
_sector->logicalSide = header[1] & 1;
_sector->logicalSector = header[2];
uint32_t wanteddatachecksum = amigaDeinterleave(ptr, 4).reader().read_be32();
uint32_t gotdatachecksum = amigaChecksum(rawbytes.slice(56, 1024));
uint32_t wantedheaderchecksum =
amigaDeinterleave(ptr, 4).reader().read_be32();
uint32_t gotheaderchecksum = amigaChecksum(rawbytes.slice(0, 40));
if (gotheaderchecksum != wantedheaderchecksum)
return;
Bytes data;
data.writer().append(amigaDeinterleave(ptr, 512)).append(recoveryinfo);
_sector->data = data;
_sector->status = (gotdatachecksum == wanteddatachecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
uint32_t wanteddatachecksum =
amigaDeinterleave(ptr, 4).reader().read_be32();
uint32_t gotdatachecksum = amigaChecksum(rawbytes.slice(56, 1024));
Bytes data;
data.writer().append(amigaDeinterleave(ptr, 512)).append(recoveryinfo);
_sector->data = data;
_sector->status = (gotdatachecksum == wanteddatachecksum)
? Sector::OK
: Sector::BAD_CHECKSUM;
}
private:
const AmigaDecoderProto& _config;
nanoseconds_t _clock;
const AmigaDecoderProto& _config;
nanoseconds_t _clock;
};
std::unique_ptr<Decoder> createAmigaDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new AmigaDecoder(config));
return std::unique_ptr<Decoder>(new AmigaDecoder(config));
}

View File

@@ -1,10 +1,10 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "amiga.h"
#include "crc.h"
#include "readerwriter.h"
#include "image.h"
#include "lib/core/globals.h"
#include "lib/core/utils.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/amiga/amiga.h"
#include "lib/core/crc.h"
#include "lib/data/image.h"
#include "arch/amiga/amiga.pb.h"
#include "lib/encoders/encoders.pb.h"
@@ -59,7 +59,7 @@ static void write_sector(std::vector<bool>& bits,
const std::shared_ptr<const Sector>& sector)
{
if ((sector->data.size() != 512) && (sector->data.size() != 528))
Error() << "unsupported sector size --- you must pick 512 or 528";
error("unsupported sector size --- you must pick 512 or 528");
uint32_t checksum = 0;
@@ -114,7 +114,8 @@ public:
const std::vector<std::shared_ptr<const Sector>>& sectors,
const Image& image) override
{
/* Number of bits for one nominal revolution of a real 200ms Amiga disk. */
/* Number of bits for one nominal revolution of a real 200ms Amiga disk.
*/
int bitsPerRevolution = 200e3 / _config.clock_rate_us();
std::vector<bool> bits(bitsPerRevolution);
unsigned cursor = 0;
@@ -129,13 +130,12 @@ public:
write_sector(bits, cursor, sector);
if (cursor >= bits.size())
Error() << "track data overrun";
error("track data overrun");
fillBitmapTo(bits, cursor, bits.size(), {true, false});
auto fluxmap = std::make_unique<Fluxmap>();
fluxmap->appendBits(bits,
calculatePhysicalClockPeriod(
_config.clock_rate_us() * 1e3, 200e6));
calculatePhysicalClockPeriod(_config.clock_rate_us() * 1e3, 200e6));
return fluxmap;
}

View File

@@ -2,19 +2,18 @@
#define APPLE2_H
#include <memory.h>
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#define APPLE2_SECTOR_RECORD 0xd5aa96
#define APPLE2_DATA_RECORD 0xd5aaad
#define APPLE2_SECTOR_RECORD 0xd5aa96
#define APPLE2_DATA_RECORD 0xd5aaad
#define APPLE2_SECTOR_LENGTH 256
#define APPLE2_SECTOR_LENGTH 256
#define APPLE2_ENCODED_SECTOR_LENGTH 342
#define APPLE2_SECTORS 16
#define APPLE2_SECTORS 16
extern std::unique_ptr<Decoder> createApple2Decoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createApple2Encoder(const EncoderProto& config);
#endif

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message Apple2DecoderProto {
optional uint32 side_one_track_offset = 1

View File

@@ -1,13 +1,14 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "apple2.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/apple2/apple2.h"
#include "arch/apple2/apple2.pb.h"
#include "lib/decoders/decoders.pb.h"
#include "bytes.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include <string.h>
#include <algorithm>

View File

@@ -1,14 +1,14 @@
#include "globals.h"
#include "lib/core/globals.h"
#include "lib/core/utils.h"
#include "arch/apple2/apple2.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "sector.h"
#include "readerwriter.h"
#include "image.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "lib/data/sector.h"
#include "lib/data/image.h"
#include "fmt/format.h"
#include "lib/encoders/encoders.pb.h"
#include <ctype.h>
#include "bytes.h"
#include "lib/core/bytes.h"
static int encode_data_gcr(uint8_t data)
{
@@ -50,8 +50,7 @@ public:
writeSector(bits, cursor, *sector);
if (cursor >= bits.size())
Error() << fmt::format(
"track data overrun by {} bits", cursor - bits.size());
error("track data overrun by {} bits", cursor - bits.size());
fillBitmapTo(bits, cursor, bits.size(), {true, false});
std::unique_ptr<Fluxmap> fluxmap(new Fluxmap);
@@ -118,8 +117,7 @@ private:
// There is data to encode to disk.
if ((sector.data.size() != APPLE2_SECTOR_LENGTH))
Error() << fmt::format(
"unsupported sector size {} --- you must pick 256",
error("unsupported sector size {} --- you must pick 256",
sector.data.size());
// Write address syncing leader : A sequence of "FF40"s; 5 of them

97
arch/arch.cc Normal file
View File

@@ -0,0 +1,97 @@
#include "lib/core/globals.h"
#include "lib/encoders/encoders.h"
#include "lib/decoders/decoders.h"
#include "lib/config/config.h"
#include "arch/agat/agat.h"
#include "arch/aeslanier/aeslanier.h"
#include "arch/amiga/amiga.h"
#include "arch/apple2/apple2.h"
#include "arch/brother/brother.h"
#include "arch/c64/c64.h"
#include "arch/f85/f85.h"
#include "arch/fb100/fb100.h"
#include "arch/ibm/ibm.h"
#include "arch/macintosh/macintosh.h"
#include "arch/micropolis/micropolis.h"
#include "arch/mx/mx.h"
#include "arch/northstar/northstar.h"
#include "arch/rolandd20/rolandd20.h"
#include "arch/smaky6/smaky6.h"
#include "arch/tartu/tartu.h"
#include "arch/tids990/tids990.h"
#include "arch/victor9k/victor9k.h"
#include "arch/zilogmcz/zilogmcz.h"
#include "arch/arch.h"
std::unique_ptr<Encoder> Arch::createEncoder(Config& config)
{
if (!config.hasEncoder())
error("no encoder configured");
return createEncoder(config->encoder());
}
std::unique_ptr<Encoder> Arch::createEncoder(const EncoderProto& config)
{
static const std::map<int,
std::function<std::unique_ptr<Encoder>(const EncoderProto&)>>
encoders = {
{EncoderProto::kAgat, createAgatEncoder },
{EncoderProto::kAmiga, createAmigaEncoder },
{EncoderProto::kApple2, createApple2Encoder },
{EncoderProto::kBrother, createBrotherEncoder },
{EncoderProto::kC64, createCommodore64Encoder},
{EncoderProto::kIbm, createIbmEncoder },
{EncoderProto::kMacintosh, createMacintoshEncoder },
{EncoderProto::kMicropolis, createMicropolisEncoder },
{EncoderProto::kNorthstar, createNorthstarEncoder },
{EncoderProto::kTartu, createTartuEncoder },
{EncoderProto::kTids990, createTids990Encoder },
{EncoderProto::kVictor9K, createVictor9kEncoder },
};
auto encoder = encoders.find(config.format_case());
if (encoder == encoders.end())
error("no encoder specified");
return (encoder->second)(config);
}
std::unique_ptr<Decoder> Arch::createDecoder(Config& config)
{
if (!config.hasDecoder())
error("no decoder configured");
return createDecoder(config->decoder());
}
std::unique_ptr<Decoder> Arch::createDecoder(const DecoderProto& config)
{
static const std::map<int,
std::function<std::unique_ptr<Decoder>(const DecoderProto&)>>
decoders = {
{DecoderProto::kAgat, createAgatDecoder },
{DecoderProto::kAeslanier, createAesLanierDecoder },
{DecoderProto::kAmiga, createAmigaDecoder },
{DecoderProto::kApple2, createApple2Decoder },
{DecoderProto::kBrother, createBrotherDecoder },
{DecoderProto::kC64, createCommodore64Decoder},
{DecoderProto::kF85, createDurangoF85Decoder },
{DecoderProto::kFb100, createFb100Decoder },
{DecoderProto::kIbm, createIbmDecoder },
{DecoderProto::kMacintosh, createMacintoshDecoder },
{DecoderProto::kMicropolis, createMicropolisDecoder },
{DecoderProto::kMx, createMxDecoder },
{DecoderProto::kNorthstar, createNorthstarDecoder },
{DecoderProto::kRolandd20, createRolandD20Decoder },
{DecoderProto::kSmaky6, createSmaky6Decoder },
{DecoderProto::kTartu, createTartuDecoder },
{DecoderProto::kTids990, createTids990Decoder },
{DecoderProto::kVictor9K, createVictor9kDecoder },
{DecoderProto::kZilogmcz, createZilogMczDecoder },
};
auto decoder = decoders.find(config.format_case());
if (decoder == decoders.end())
error("no decoder specified");
return (decoder->second)(config);
}

16
arch/arch.h Normal file
View File

@@ -0,0 +1,16 @@
#pragma once
class Encoder;
class Decoder;
class DecoderProto;
class EncoderProto;
class Config;
namespace Arch
{
std::unique_ptr<Decoder> createDecoder(Config& config);
std::unique_ptr<Decoder> createDecoder(const DecoderProto& config);
std::unique_ptr<Encoder> createEncoder(Config& config);
std::unique_ptr<Encoder> createEncoder(const EncoderProto& config);
}

View File

@@ -3,17 +3,19 @@
/* Brother word processor format (or at least, one of them) */
#define BROTHER_SECTOR_RECORD 0xFFFFFD57
#define BROTHER_DATA_RECORD 0xFFFFFDDB
#define BROTHER_DATA_RECORD_PAYLOAD 256
#define BROTHER_DATA_RECORD_CHECKSUM 3
#define BROTHER_SECTOR_RECORD 0xFFFFFD57
#define BROTHER_DATA_RECORD 0xFFFFFDDB
#define BROTHER_DATA_RECORD_PAYLOAD 256
#define BROTHER_DATA_RECORD_CHECKSUM 3
#define BROTHER_DATA_RECORD_ENCODED_SIZE 415
#define BROTHER_TRACKS_PER_240KB_DISK 78
#define BROTHER_TRACKS_PER_120KB_DISK 39
#define BROTHER_SECTORS_PER_TRACK 12
#define BROTHER_TRACKS_PER_240KB_DISK 78
#define BROTHER_TRACKS_PER_120KB_DISK 39
#define BROTHER_SECTORS_PER_TRACK 12
extern std::unique_ptr<Decoder> createBrotherDecoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createBrotherEncoder(const EncoderProto& config);
extern std::unique_ptr<Decoder> createBrotherDecoder(
const DecoderProto& config);
extern std::unique_ptr<Encoder> createBrotherEncoder(
const EncoderProto& config);
#endif

View File

@@ -1,13 +1,13 @@
GCR_ENTRY(0x55, 0) // 00000
GCR_ENTRY(0x57, 1) // 00001
GCR_ENTRY(0x5b, 2) // 00010
GCR_ENTRY(0x5d, 3) // 00011
GCR_ENTRY(0x5f, 4) // 00100
GCR_ENTRY(0x6b, 5) // 00101
GCR_ENTRY(0x6d, 6) // 00110
GCR_ENTRY(0x6f, 7) // 00111
GCR_ENTRY(0x75, 8) // 01000
GCR_ENTRY(0x77, 9) // 01001
GCR_ENTRY(0x55, 0) // 00000
GCR_ENTRY(0x57, 1) // 00001
GCR_ENTRY(0x5b, 2) // 00010
GCR_ENTRY(0x5d, 3) // 00011
GCR_ENTRY(0x5f, 4) // 00100
GCR_ENTRY(0x6b, 5) // 00101
GCR_ENTRY(0x6d, 6) // 00110
GCR_ENTRY(0x6f, 7) // 00111
GCR_ENTRY(0x75, 8) // 01000
GCR_ENTRY(0x77, 9) // 01001
GCR_ENTRY(0x7b, 10) // 01010
GCR_ENTRY(0x7d, 11) // 01011
GCR_ENTRY(0x7f, 12) // 01100
@@ -30,4 +30,3 @@ GCR_ENTRY(0xef, 28) // 11100
GCR_ENTRY(0xf5, 29) // 11101
GCR_ENTRY(0xf7, 30) // 11110
GCR_ENTRY(0xfb, 31) // 11111

View File

@@ -1,17 +1,19 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "brother.h"
#include "sector.h"
#include "bytes.h"
#include "crc.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/brother/brother.h"
#include "lib/data/sector.h"
#include "lib/core/bytes.h"
#include "lib/core/crc.h"
#include <ctype.h>
const FluxPattern SECTOR_RECORD_PATTERN(32, BROTHER_SECTOR_RECORD);
const FluxPattern DATA_RECORD_PATTERN(32, BROTHER_DATA_RECORD);
const FluxMatchers ANY_RECORD_PATTERN({ &SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN });
const FluxMatchers ANY_RECORD_PATTERN(
{&SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN});
static std::vector<uint8_t> outputbuffer;
@@ -32,88 +34,89 @@ static int decode_data_gcr(uint8_t gcr)
{
switch (gcr)
{
#define GCR_ENTRY(gcr, data) \
case gcr: return data;
#include "data_gcr.h"
#undef GCR_ENTRY
#define GCR_ENTRY(gcr, data) \
case gcr: \
return data;
#include "data_gcr.h"
#undef GCR_ENTRY
}
return -1;
}
static int decode_header_gcr(uint16_t word)
{
switch (word)
{
#define GCR_ENTRY(gcr, data) \
case gcr: return data;
#include "header_gcr.h"
#undef GCR_ENTRY
}
return -1;
switch (word)
{
#define GCR_ENTRY(gcr, data) \
case gcr: \
return data;
#include "header_gcr.h"
#undef GCR_ENTRY
}
return -1;
}
class BrotherDecoder : public Decoder
{
public:
BrotherDecoder(const DecoderProto& config):
Decoder(config)
{}
BrotherDecoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(ANY_RECORD_PATTERN);
}
{
return seekToPattern(ANY_RECORD_PATTERN);
}
void decodeSectorRecord() override
{
if (readRaw32() != BROTHER_SECTOR_RECORD)
return;
{
if (readRaw32() != BROTHER_SECTOR_RECORD)
return;
const auto& rawbits = readRawBits(32);
const auto& bytes = toBytes(rawbits).slice(0, 4);
const auto& rawbits = readRawBits(32);
const auto& bytes = toBytes(rawbits).slice(0, 4);
ByteReader br(bytes);
_sector->logicalTrack = decode_header_gcr(br.read_be16());
_sector->logicalSector = decode_header_gcr(br.read_be16());
ByteReader br(bytes);
_sector->logicalTrack = decode_header_gcr(br.read_be16());
_sector->logicalSector = decode_header_gcr(br.read_be16());
/* Sanity check the values read; there's no header checksum and
* occasionally we get garbage due to bit errors. */
if (_sector->logicalSector > 11)
return;
if (_sector->logicalTrack > 79)
return;
/* Sanity check the values read; there's no header checksum and
* occasionally we get garbage due to bit errors. */
if (_sector->logicalSector > 11)
return;
if (_sector->logicalTrack > 79)
return;
_sector->status = Sector::DATA_MISSING;
}
_sector->status = Sector::DATA_MISSING;
}
void decodeDataRecord() override
{
if (readRaw32() != BROTHER_DATA_RECORD)
return;
{
if (readRaw32() != BROTHER_DATA_RECORD)
return;
const auto& rawbits = readRawBits(BROTHER_DATA_RECORD_ENCODED_SIZE*8);
const auto& rawbytes = toBytes(rawbits).slice(0, BROTHER_DATA_RECORD_ENCODED_SIZE);
const auto& rawbits = readRawBits(BROTHER_DATA_RECORD_ENCODED_SIZE * 8);
const auto& rawbytes =
toBytes(rawbits).slice(0, BROTHER_DATA_RECORD_ENCODED_SIZE);
Bytes bytes;
ByteWriter bw(bytes);
BitWriter bitw(bw);
for (uint8_t b : rawbytes)
{
uint32_t nibble = decode_data_gcr(b);
bitw.push(nibble, 5);
}
bitw.flush();
Bytes bytes;
ByteWriter bw(bytes);
BitWriter bitw(bw);
for (uint8_t b : rawbytes)
{
uint32_t nibble = decode_data_gcr(b);
bitw.push(nibble, 5);
}
bitw.flush();
_sector->data = bytes.slice(0, BROTHER_DATA_RECORD_PAYLOAD);
uint32_t realCrc = crcbrother(_sector->data);
uint32_t wantCrc = bytes.reader().seek(BROTHER_DATA_RECORD_PAYLOAD).read_be24();
_sector->status = (realCrc == wantCrc) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->data = bytes.slice(0, BROTHER_DATA_RECORD_PAYLOAD);
uint32_t realCrc = crcbrother(_sector->data);
uint32_t wantCrc =
bytes.reader().seek(BROTHER_DATA_RECORD_PAYLOAD).read_be24();
_sector->status =
(realCrc == wantCrc) ? Sector::OK : Sector::BAD_CHECKSUM;
}
};
std::unique_ptr<Decoder> createBrotherDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new BrotherDecoder(config));
return std::unique_ptr<Decoder>(new BrotherDecoder(config));
}

View File

@@ -1,10 +1,10 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "brother.h"
#include "crc.h"
#include "readerwriter.h"
#include "image.h"
#include "lib/core/globals.h"
#include "lib/core/utils.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/brother/brother.h"
#include "lib/core/crc.h"
#include "lib/data/image.h"
#include "arch/brother/brother.pb.h"
#include "lib/encoders/encoders.pb.h"
@@ -67,7 +67,7 @@ static void write_sector_data(
int width = 0;
if (data.size() != BROTHER_DATA_RECORD_PAYLOAD)
Error() << "unsupported sector size";
error("unsupported sector size");
auto write_byte = [&](uint8_t byte)
{
@@ -107,8 +107,7 @@ public:
}
public:
std::unique_ptr<Fluxmap> encode(
std::shared_ptr<const TrackInfo>& trackInfo,
std::unique_ptr<Fluxmap> encode(std::shared_ptr<const TrackInfo>& trackInfo,
const std::vector<std::shared_ptr<const Sector>>& sectors,
const Image& image) override
{
@@ -116,8 +115,8 @@ public:
std::vector<bool> bits(bitsPerRevolution);
unsigned cursor = 0;
int sectorCount = 0;
for (const auto& sectorData : sectors)
int sectorCount = 0;
for (const auto& sectorData : sectors)
{
double headerMs = _config.post_index_gap_ms() +
sectorCount * _config.sector_spacing_ms();
@@ -126,16 +125,18 @@ public:
unsigned dataCursor = dataMs * 1e3 / _config.clock_rate_us();
fillBitmapTo(bits, cursor, headerCursor, {true, false});
write_sector_header(
bits, cursor, sectorData->logicalTrack, sectorData->logicalSector);
write_sector_header(bits,
cursor,
sectorData->logicalTrack,
sectorData->logicalSector);
fillBitmapTo(bits, cursor, dataCursor, {true, false});
write_sector_data(bits, cursor, sectorData->data);
sectorCount++;
sectorCount++;
}
if (cursor >= bits.size())
Error() << "track data overrun";
error("track data overrun");
fillBitmapTo(bits, cursor, bits.size(), {true, false});
std::unique_ptr<Fluxmap> fluxmap(new Fluxmap);
@@ -147,8 +148,7 @@ private:
const BrotherEncoderProto& _config;
};
std::unique_ptr<Encoder> createBrotherEncoder(
const EncoderProto& config)
std::unique_ptr<Encoder> createBrotherEncoder(const EncoderProto& config)
{
return std::unique_ptr<Encoder>(new BrotherEncoder(config));
}

View File

@@ -76,4 +76,3 @@ GCR_ENTRY(0x6BAB, 74)
GCR_ENTRY(0xAD5F, 75)
GCR_ENTRY(0xDBED, 76)
GCR_ENTRY(0x55BB, 77)

View File

@@ -1,44 +0,0 @@
LIBARCH_SRCS = \
arch/aeslanier/decoder.cc \
arch/agat/agat.cc \
arch/agat/decoder.cc \
arch/agat/encoder.cc \
arch/amiga/amiga.cc \
arch/amiga/decoder.cc \
arch/amiga/encoder.cc \
arch/apple2/decoder.cc \
arch/apple2/encoder.cc \
arch/brother/decoder.cc \
arch/brother/encoder.cc \
arch/c64/c64.cc \
arch/c64/decoder.cc \
arch/c64/encoder.cc \
arch/f85/decoder.cc \
arch/fb100/decoder.cc \
arch/ibm/decoder.cc \
arch/ibm/encoder.cc \
arch/macintosh/decoder.cc \
arch/macintosh/encoder.cc \
arch/micropolis/decoder.cc \
arch/micropolis/encoder.cc \
arch/mx/decoder.cc \
arch/northstar/decoder.cc \
arch/northstar/encoder.cc \
arch/rolandd20/decoder.cc \
arch/smaky6/decoder.cc \
arch/tids990/decoder.cc \
arch/tids990/encoder.cc \
arch/victor9k/decoder.cc \
arch/victor9k/encoder.cc \
arch/zilogmcz/decoder.cc \
LIBARCH_OBJS = $(patsubst %.cc, $(OBJDIR)/%.o, $(LIBARCH_SRCS))
OBJS += $(LIBARCH_OBJS)
$(LIBARCH_SRCS): | $(PROTO_HDRS)
$(LIBARCH_SRCS): CFLAGS += $(PROTO_CFLAGS)
LIBARCH_LIB = $(OBJDIR)/libarch.a
$(LIBARCH_LIB): $(LIBARCH_OBJS)
LIBARCH_LDFLAGS = $(LIBARCH_LIB)
$(call use-pkgconfig, $(LIBARCH_LIB), $(LIBARCH_OBJS), fmt)

61
arch/build.py Normal file
View File

@@ -0,0 +1,61 @@
from build.c import cxxlibrary
from build.protobuf import proto, protocc, protolib
from os.path import *
from glob import glob
import sys
archs = [f for f in glob("*", root_dir="arch") if isfile(f"arch/{f}/{f}.proto")]
ps = []
pls = []
cls = []
for a in archs:
ps += [
proto(
name=f"proto_{a}",
srcs=[f"arch/{a}/{a}.proto"],
deps=["lib/config+common_proto"],
)
]
pls += [
protocc(
name=f"proto_lib_{a}",
srcs=[f".+proto_{a}"],
deps=["lib/config+common_proto_lib"],
)
]
cls += [
cxxlibrary(
name=f"arch_{a}",
srcs=glob(f"arch/{a}/*.cc") + glob(f"arch/{a}/*.h"),
hdrs={f"arch/{a}/{a}.h": f"arch/{a}/{a}.h"},
deps=[
"lib/core",
"lib/data",
"lib/config",
"lib/encoders",
"lib/decoders",
],
)
]
protolib(
name="proto",
srcs=ps + ["lib/config+common_proto"],
)
cxxlibrary(name="proto_lib", deps=pls)
cxxlibrary(
name="arch",
srcs=[
"./arch.cc",
],
hdrs={
"arch/arch.h": "./arch.h",
},
deps=cls
+ ["lib/core", "lib/data", "lib/config", "lib/encoders", "lib/decoders"],
)

View File

@@ -1,28 +1,28 @@
#include "globals.h"
#include "c64.h"
#include "lib/core/globals.h"
#include "arch/c64/c64.h"
/*
* Track Sectors/track # Sectors Storage in Bytes Clock rate
* ----- ------------- --------- ---------------- ----------
* 1-17 21 357 7820 3.25
* 18-24 19 133 7170 3.5
* 25-30 18 108 6300 3.75
* 31-40(*) 17 85 6020 4
* ---
* 683 (for a 35 track image)
*
* The clock rate is normalised for a 200ms drive.
*/
* Track Sectors/track # Sectors Storage in Bytes Clock rate
* ----- ------------- --------- ---------------- ----------
* 1-17 21 357 7820 3.25
* 18-24 19 133 7170 3.5
* 25-30 18 108 6300 3.75
* 31-40(*) 17 85 6020 4
* ---
* 683 (for a 35 track image)
*
* The clock rate is normalised for a 200ms drive.
*/
nanoseconds_t clockPeriodForC64Track(unsigned track)
{
constexpr double BYTE_SIZE = 8.0;
constexpr double b = 8.0;
if (track < 17)
return 26.0 / BYTE_SIZE;
return 26.0 / b;
if (track < 24)
return 28.0 / BYTE_SIZE;
return 28.0 / b;
if (track < 30)
return 30.0 / BYTE_SIZE;
return 32.0 / BYTE_SIZE;
return 30.0 / b;
return 32.0 / b;
}

View File

@@ -1,14 +1,14 @@
#ifndef C64_H
#define C64_H
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#define C64_SECTOR_RECORD 0xffd49
#define C64_DATA_RECORD 0xffd57
#define C64_SECTOR_LENGTH 256
#define C64_SECTOR_RECORD 0xffd49
#define C64_DATA_RECORD 0xffd57
#define C64_SECTOR_LENGTH 256
/* Source: http://www.unusedino.de/ec64/technical/formats/g64.html
/* Source: http://www.unusedino.de/ec64/technical/formats/g64.html
1. Header sync FF FF FF FF FF (40 'on' bits, not GCR)
2. Header info 52 54 B5 29 4B 7A 5E 95 55 55 (10 GCR bytes)
3. Header gap 55 55 55 55 55 55 55 55 55 (9 bytes, never read)
@@ -17,18 +17,20 @@
6. Inter-sector gap 55 55 55 55...55 55 (4 to 12 bytes, never read)
1. Header sync (SYNC for the next sector)
*/
#define C64_HEADER_DATA_SYNC 0xFF
#define C64_HEADER_BLOCK_ID 0x08
#define C64_DATA_BLOCK_ID 0x07
#define C64_HEADER_GAP 0x55
#define C64_INTER_SECTOR_GAP 0x55
#define C64_PADDING 0x0F
#define C64_HEADER_DATA_SYNC 0xFF
#define C64_HEADER_BLOCK_ID 0x08
#define C64_DATA_BLOCK_ID 0x07
#define C64_HEADER_GAP 0x55
#define C64_INTER_SECTOR_GAP 0x55
#define C64_PADDING 0x0F
#define C64_TRACKS_PER_DISK 40
#define C64_BAM_TRACK 17
#define C64_TRACKS_PER_DISK 40
#define C64_BAM_TRACK 17
extern std::unique_ptr<Decoder> createCommodore64Decoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createCommodore64Encoder(const EncoderProto& config);
extern std::unique_ptr<Decoder> createCommodore64Decoder(
const DecoderProto& config);
extern std::unique_ptr<Encoder> createCommodore64Encoder(
const EncoderProto& config);
extern nanoseconds_t clockPeriodForC64Track(unsigned track);

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message Commodore64DecoderProto {}

View File

@@ -1,12 +1,13 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "c64.h"
#include "crc.h"
#include "bytes.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/c64/c64.h"
#include "lib/core/crc.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include <string.h>
#include <algorithm>
@@ -96,8 +97,7 @@ public:
}
};
std::unique_ptr<Decoder> createCommodore64Decoder(
const DecoderProto& config)
std::unique_ptr<Decoder> createCommodore64Decoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new Commodore64Decoder(config));
}

View File

@@ -1,17 +1,17 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "c64.h"
#include "crc.h"
#include "sector.h"
#include "readerwriter.h"
#include "image.h"
#include "lib/core/globals.h"
#include "lib/core/utils.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/c64/c64.h"
#include "lib/core/crc.h"
#include "lib/data/sector.h"
#include "lib/data/image.h"
#include "fmt/format.h"
#include "arch/c64/c64.pb.h"
#include "lib/encoders/encoders.pb.h"
#include "lib/layout.h"
#include "lib/data/layout.h"
#include <ctype.h>
#include "bytes.h"
#include "lib/core/bytes.h"
static bool lastBit;
@@ -51,26 +51,6 @@ static void write_bits(
}
}
void bindump(std::ostream& stream, std::vector<bool>& buffer)
{
size_t pos = 0;
while ((pos < buffer.size()) and (pos < 520))
{
stream << fmt::format("{:5d} : ", pos);
for (int i = 0; i < 40; i++)
{
if ((pos + i) < buffer.size())
stream << fmt::format("{:01b}", (buffer[pos + i]));
else
stream << "-- ";
if ((((pos + i + 1) % 8) == 0) and i != 0)
stream << " ";
}
stream << std::endl;
pos += 40;
}
}
static std::vector<bool> encode_data(uint8_t input)
{
/*
@@ -214,8 +194,7 @@ public:
writeSector(bits, cursor, sector);
if (cursor >= bits.size())
Error() << fmt::format(
"track data overrun by {} bits", cursor - bits.size());
error("track data overrun by {} bits", cursor - bits.size());
fillBitmapTo(bits, cursor, bits.size(), {true, false});
std::unique_ptr<Fluxmap> fluxmap(new Fluxmap);
@@ -243,8 +222,7 @@ private:
{
// There is data to encode to disk.
if ((sector->data.size() != C64_SECTOR_LENGTH))
Error() << fmt::format(
"unsupported sector size {} --- you must pick 256",
error("unsupported sector size {} --- you must pick 256",
sector->data.size());
// 1. Write header Sync (not GCR)

View File

@@ -1,28 +1,31 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "f85.h"
#include "crc.h"
#include "bytes.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/f85/f85.h"
#include "lib/core/crc.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include <string.h>
#include <algorithm>
const FluxPattern SECTOR_RECORD_PATTERN(24, F85_SECTOR_RECORD);
const FluxPattern DATA_RECORD_PATTERN(24, F85_DATA_RECORD);
const FluxMatchers ANY_RECORD_PATTERN({ &SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN });
const FluxMatchers ANY_RECORD_PATTERN(
{&SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN});
static int decode_data_gcr(uint8_t gcr)
{
switch (gcr)
{
#define GCR_ENTRY(gcr, data) \
case gcr: return data;
#include "data_gcr.h"
#undef GCR_ENTRY
#define GCR_ENTRY(gcr, data) \
case gcr: \
return data;
#include "data_gcr.h"
#undef GCR_ENTRY
}
return -1;
}
@@ -37,11 +40,11 @@ static Bytes decode(const std::vector<bool>& bits)
while (ii != bits.end())
{
uint8_t inputfifo = 0;
for (size_t i=0; i<5; i++)
for (size_t i = 0; i < 5; i++)
{
if (ii == bits.end())
break;
inputfifo = (inputfifo<<1) | *ii++;
inputfifo = (inputfifo << 1) | *ii++;
}
bitw.push(decode_data_gcr(inputfifo), 4);
@@ -54,56 +57,55 @@ static Bytes decode(const std::vector<bool>& bits)
class DurangoF85Decoder : public Decoder
{
public:
DurangoF85Decoder(const DecoderProto& config):
Decoder(config)
{}
DurangoF85Decoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(ANY_RECORD_PATTERN);
}
{
return seekToPattern(ANY_RECORD_PATTERN);
}
void decodeSectorRecord() override
{
/* Skip sync bits and ID byte. */
{
/* Skip sync bits and ID byte. */
if (readRaw24() != F85_SECTOR_RECORD)
return;
if (readRaw24() != F85_SECTOR_RECORD)
return;
/* Read header. */
/* Read header. */
const auto& bytes = decode(readRawBits(6*10));
const auto& bytes = decode(readRawBits(6 * 10));
_sector->logicalSector = bytes[2];
_sector->logicalSide = 0;
_sector->logicalTrack = bytes[0];
_sector->logicalSector = bytes[2];
_sector->logicalSide = 0;
_sector->logicalTrack = bytes[0];
uint16_t wantChecksum = bytes.reader().seek(4).read_be16();
uint16_t gotChecksum = crc16(CCITT_POLY, 0xef21, bytes.slice(0, 4));
if (wantChecksum == gotChecksum)
_sector->status = Sector::DATA_MISSING; /* unintuitive but correct */
}
uint16_t wantChecksum = bytes.reader().seek(4).read_be16();
uint16_t gotChecksum = crc16(CCITT_POLY, 0xef21, bytes.slice(0, 4));
if (wantChecksum == gotChecksum)
_sector->status =
Sector::DATA_MISSING; /* unintuitive but correct */
}
void decodeDataRecord() override
{
/* Skip sync bits ID byte. */
{
/* Skip sync bits ID byte. */
if (readRaw24() != F85_DATA_RECORD)
return;
if (readRaw24() != F85_DATA_RECORD)
return;
const auto& bytes = decode(readRawBits((F85_SECTOR_LENGTH+3)*10))
.slice(0, F85_SECTOR_LENGTH+3);
ByteReader br(bytes);
const auto& bytes = decode(readRawBits((F85_SECTOR_LENGTH + 3) * 10))
.slice(0, F85_SECTOR_LENGTH + 3);
ByteReader br(bytes);
_sector->data = br.read(F85_SECTOR_LENGTH);
uint16_t wantChecksum = br.read_be16();
uint16_t gotChecksum = crc16(CCITT_POLY, 0xbf84, _sector->data);
_sector->status = (wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->data = br.read(F85_SECTOR_LENGTH);
uint16_t wantChecksum = br.read_be16();
uint16_t gotChecksum = crc16(CCITT_POLY, 0xbf84, _sector->data);
_sector->status =
(wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
};
std::unique_ptr<Decoder> createDurangoF85Decoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new DurangoF85Decoder(config));
return std::unique_ptr<Decoder>(new DurangoF85Decoder(config));
}

View File

@@ -2,9 +2,10 @@
#define F85_H
#define F85_SECTOR_RECORD 0xffffce /* 1111 1111 1111 1111 1100 1110 */
#define F85_DATA_RECORD 0xffffcb /* 1111 1111 1111 1111 1100 1101 */
#define F85_SECTOR_LENGTH 512
#define F85_DATA_RECORD 0xffffcb /* 1111 1111 1111 1111 1100 1101 */
#define F85_SECTOR_LENGTH 512
extern std::unique_ptr<Decoder> createDurangoF85Decoder(const DecoderProto& config);
extern std::unique_ptr<Decoder> createDurangoF85Decoder(
const DecoderProto& config);
#endif

View File

@@ -1,23 +1,24 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "fb100.h"
#include "crc.h"
#include "bytes.h"
#include "decoders/rawbits.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/fb100/fb100.h"
#include "lib/core/crc.h"
#include "lib/core/bytes.h"
#include "lib/decoders/rawbits.h"
#include "fmt/format.h"
#include <string.h>
#include <algorithm>
const FluxPattern SECTOR_ID_PATTERN(16, 0xabaa);
/*
/*
* Reverse engineered from a dump of the floppy drive's ROM. I have no idea how
* it works.
*
*
* LF8BA:
* clra
* staa X00B0
@@ -100,45 +101,43 @@ static uint16_t checksum(const Bytes& bytes)
class Fb100Decoder : public Decoder
{
public:
Fb100Decoder(const DecoderProto& config):
Decoder(config)
{}
Fb100Decoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(SECTOR_ID_PATTERN);
}
{
return seekToPattern(SECTOR_ID_PATTERN);
}
void decodeSectorRecord() override
{
auto rawbits = readRawBits(FB100_RECORD_SIZE*16);
{
auto rawbits = readRawBits(FB100_RECORD_SIZE * 16);
const Bytes bytes = decodeFmMfm(rawbits).slice(0, FB100_RECORD_SIZE);
ByteReader br(bytes);
br.seek(1);
const Bytes id = br.read(FB100_ID_SIZE);
uint16_t wantIdCrc = br.read_be16();
uint16_t gotIdCrc = checksum(id);
const Bytes payload = br.read(FB100_PAYLOAD_SIZE);
uint16_t wantPayloadCrc = br.read_be16();
uint16_t gotPayloadCrc = checksum(payload);
const Bytes bytes = decodeFmMfm(rawbits).slice(0, FB100_RECORD_SIZE);
ByteReader br(bytes);
br.seek(1);
const Bytes id = br.read(FB100_ID_SIZE);
uint16_t wantIdCrc = br.read_be16();
uint16_t gotIdCrc = checksum(id);
const Bytes payload = br.read(FB100_PAYLOAD_SIZE);
uint16_t wantPayloadCrc = br.read_be16();
uint16_t gotPayloadCrc = checksum(payload);
if (wantIdCrc != gotIdCrc)
return;
if (wantIdCrc != gotIdCrc)
return;
uint8_t abssector = id[2];
_sector->logicalTrack = abssector >> 1;
_sector->logicalSide = 0;
_sector->logicalSector = abssector & 1;
_sector->data.writer().append(id.slice(5, 12)).append(payload);
uint8_t abssector = id[2];
_sector->logicalTrack = abssector >> 1;
_sector->logicalSide = 0;
_sector->logicalSector = abssector & 1;
_sector->data.writer().append(id.slice(5, 12)).append(payload);
_sector->status = (wantPayloadCrc == gotPayloadCrc) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->status = (wantPayloadCrc == gotPayloadCrc)
? Sector::OK
: Sector::BAD_CHECKSUM;
}
};
std::unique_ptr<Decoder> createFb100Decoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new Fb100Decoder(config));
return std::unique_ptr<Decoder>(new Fb100Decoder(config));
}

View File

@@ -8,4 +8,3 @@
extern std::unique_ptr<Decoder> createFb100Decoder(const DecoderProto& config);
#endif

View File

@@ -1,13 +1,14 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "ibm.h"
#include "crc.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "sector.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "arch/ibm/ibm.h"
#include "lib/core/crc.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/data/sector.h"
#include "arch/ibm/ibm.pb.h"
#include "proto.h"
#include "lib/layout.h"
#include "lib/config/proto.h"
#include "lib/data/layout.h"
#include <string.h>
static_assert(std::is_trivially_copyable<IbmIdam>::value,

View File

@@ -1,15 +1,15 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "ibm.h"
#include "crc.h"
#include "readerwriter.h"
#include "image.h"
#include "lib/core/globals.h"
#include "lib/config/config.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/ibm/ibm.h"
#include "lib/core/crc.h"
#include "lib/data/image.h"
#include "arch/ibm/ibm.pb.h"
#include "lib/encoders/encoders.pb.h"
#include "fmt/format.h"
#include "lib/proto.h"
#include "lib/layout.h"
#include "lib/config/proto.h"
#include "lib/data/layout.h"
#include <ctype.h>
/* IAM record separator:
@@ -112,10 +112,11 @@ public:
const Image& image) override
{
IbmEncoderProto::TrackdataProto trackdata;
getEncoderTrackData(trackdata, trackInfo->logicalTrack, trackInfo->logicalSide);
getEncoderTrackData(
trackdata, trackInfo->logicalTrack, trackInfo->logicalSide);
auto trackLayout =
Layout::getLayoutOfTrack(trackInfo->logicalTrack, trackInfo->logicalSide);
auto trackLayout = Layout::getLayoutOfTrack(
trackInfo->logicalTrack, trackInfo->logicalSide);
auto writeBytes = [&](const Bytes& bytes)
{
@@ -257,7 +258,7 @@ public:
}
if (_cursor >= _bits.size())
Error() << "track data overrun";
error("track data overrun");
while (_cursor < _bits.size())
writeFillerRawBytes(1, gapFill);

View File

@@ -31,9 +31,7 @@ class Decoder;
class DecoderProto;
class EncoderProto;
extern std::unique_ptr<Decoder> createIbmDecoder(
const DecoderProto& config);
extern std::unique_ptr<Encoder> createIbmEncoder(
const EncoderProto& config);
extern std::unique_ptr<Decoder> createIbmDecoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createIbmEncoder(const EncoderProto& config);
#endif

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message IbmDecoderProto {
// Next: 11

View File

@@ -1,33 +1,37 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "macintosh.h"
#include "bytes.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/macintosh/macintosh.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include <string.h>
#include <algorithm>
const FluxPattern SECTOR_RECORD_PATTERN(24, MAC_SECTOR_RECORD);
const FluxPattern DATA_RECORD_PATTERN(24, MAC_DATA_RECORD);
const FluxMatchers ANY_RECORD_PATTERN({ &SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN });
const FluxMatchers ANY_RECORD_PATTERN(
{&SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN});
static int decode_data_gcr(uint8_t gcr)
{
switch (gcr)
{
#define GCR_ENTRY(gcr, data) \
case gcr: return data;
#include "data_gcr.h"
#undef GCR_ENTRY
#define GCR_ENTRY(gcr, data) \
case gcr: \
return data;
#include "data_gcr.h"
#undef GCR_ENTRY
}
return -1;
}
/* This is extremely inspired by the MESS implementation, written by Nathan Woods
* and R. Belmont: https://github.com/mamedev/mame/blob/4263a71e64377db11392c458b580c5ae83556bc7/src/lib/formats/ap_dsk35.cpp
/* This is extremely inspired by the MESS implementation, written by Nathan
* Woods and R. Belmont:
* https://github.com/mamedev/mame/blob/4263a71e64377db11392c458b580c5ae83556bc7/src/lib/formats/ap_dsk35.cpp
*/
static Bytes decode_crazy_data(const Bytes& input, Sector::Status& status)
{
@@ -41,7 +45,7 @@ static Bytes decode_crazy_data(const Bytes& input, Sector::Status& status)
uint8_t b2[LOOKUP_LEN + 1];
uint8_t b3[LOOKUP_LEN + 1];
for (int i=0; i<=LOOKUP_LEN; i++)
for (int i = 0; i <= LOOKUP_LEN; i++)
{
uint8_t w4 = br.read_8();
uint8_t w1 = br.read_8();
@@ -125,67 +129,68 @@ uint8_t decode_side(uint8_t side)
class MacintoshDecoder : public Decoder
{
public:
MacintoshDecoder(const DecoderProto& config):
Decoder(config)
{}
MacintoshDecoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(ANY_RECORD_PATTERN);
}
{
return seekToPattern(ANY_RECORD_PATTERN);
}
void decodeSectorRecord() override
{
if (readRaw24() != MAC_SECTOR_RECORD)
return;
{
if (readRaw24() != MAC_SECTOR_RECORD)
return;
/* Read header. */
/* Read header. */
auto header = toBytes(readRawBits(7*8)).slice(0, 7);
uint8_t encodedTrack = decode_data_gcr(header[0]);
if (encodedTrack != (_sector->physicalTrack & 0x3f))
return;
uint8_t encodedSector = decode_data_gcr(header[1]);
uint8_t encodedSide = decode_data_gcr(header[2]);
uint8_t formatByte = decode_data_gcr(header[3]);
uint8_t wantedsum = decode_data_gcr(header[4]);
auto header = toBytes(readRawBits(7 * 8)).slice(0, 7);
if (encodedSector > 11)
return;
uint8_t encodedTrack = decode_data_gcr(header[0]);
if (encodedTrack != (_sector->physicalTrack & 0x3f))
return;
_sector->logicalTrack = _sector->physicalTrack;
_sector->logicalSide = decode_side(encodedSide);
_sector->logicalSector = encodedSector;
uint8_t gotsum = (encodedTrack ^ encodedSector ^ encodedSide ^ formatByte) & 0x3f;
if (wantedsum == gotsum)
_sector->status = Sector::DATA_MISSING; /* unintuitive but correct */
}
uint8_t encodedSector = decode_data_gcr(header[1]);
uint8_t encodedSide = decode_data_gcr(header[2]);
uint8_t formatByte = decode_data_gcr(header[3]);
uint8_t wantedsum = decode_data_gcr(header[4]);
if (encodedSector > 11)
return;
_sector->logicalTrack = _sector->physicalTrack;
_sector->logicalSide = decode_side(encodedSide);
_sector->logicalSector = encodedSector;
uint8_t gotsum =
(encodedTrack ^ encodedSector ^ encodedSide ^ formatByte) & 0x3f;
if (wantedsum == gotsum)
_sector->status =
Sector::DATA_MISSING; /* unintuitive but correct */
}
void decodeDataRecord() override
{
if (readRaw24() != MAC_DATA_RECORD)
return;
{
if (readRaw24() != MAC_DATA_RECORD)
return;
/* Read data. */
/* Read data. */
readRawBits(8); /* skip spare byte */
auto inputbuffer = toBytes(readRawBits(MAC_ENCODED_SECTOR_LENGTH*8))
.slice(0, MAC_ENCODED_SECTOR_LENGTH);
readRawBits(8); /* skip spare byte */
auto inputbuffer = toBytes(readRawBits(MAC_ENCODED_SECTOR_LENGTH * 8))
.slice(0, MAC_ENCODED_SECTOR_LENGTH);
for (unsigned i=0; i<inputbuffer.size(); i++)
inputbuffer[i] = decode_data_gcr(inputbuffer[i]);
_sector->status = Sector::BAD_CHECKSUM;
Bytes userData = decode_crazy_data(inputbuffer, _sector->status);
_sector->data.clear();
_sector->data.writer().append(userData.slice(12, 512)).append(userData.slice(0, 12));
}
for (unsigned i = 0; i < inputbuffer.size(); i++)
inputbuffer[i] = decode_data_gcr(inputbuffer[i]);
_sector->status = Sector::BAD_CHECKSUM;
Bytes userData = decode_crazy_data(inputbuffer, _sector->status);
_sector->data.clear();
_sector->data.writer()
.append(userData.slice(12, 512))
.append(userData.slice(0, 12));
}
};
std::unique_ptr<Decoder> createMacintoshDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new MacintoshDecoder(config));
return std::unique_ptr<Decoder>(new MacintoshDecoder(config));
}

View File

@@ -1,13 +1,13 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "macintosh.h"
#include "crc.h"
#include "readerwriter.h"
#include "image.h"
#include "lib/core/globals.h"
#include "lib/core/utils.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/macintosh/macintosh.h"
#include "lib/core/crc.h"
#include "lib/data/image.h"
#include "fmt/format.h"
#include "lib/encoders/encoders.pb.h"
#include "lib/layout.h"
#include "lib/data/layout.h"
#include "arch/macintosh/macintosh.pb.h"
#include <ctype.h>
@@ -174,7 +174,7 @@ static void write_sector(std::vector<bool>& bits,
const std::shared_ptr<const Sector>& sector)
{
if ((sector->data.size() != 512) && (sector->data.size() != 524))
Error() << "unsupported sector size --- you must pick 512 or 524";
error("unsupported sector size --- you must pick 512 or 524");
write_bits(bits, cursor, 0xff, 1 * 8); /* pad byte */
for (int i = 0; i < 7; i++)
@@ -239,13 +239,12 @@ public:
write_sector(bits, cursor, sector);
if (cursor >= bits.size())
Error() << fmt::format(
"track data overrun by {} bits", cursor - bits.size());
error("track data overrun by {} bits", cursor - bits.size());
fillBitmapTo(bits, cursor, bits.size(), {true, false});
std::unique_ptr<Fluxmap> fluxmap(new Fluxmap);
fluxmap->appendBits(bits,
calculatePhysicalClockPeriod(clockRateUs * 1e3, 200e6));
fluxmap->appendBits(
bits, calculatePhysicalClockPeriod(clockRateUs * 1e3, 200e6));
return fluxmap;
}
@@ -253,8 +252,7 @@ private:
const MacintoshEncoderProto& _config;
};
std::unique_ptr<Encoder> createMacintoshEncoder(
const EncoderProto& config)
std::unique_ptr<Encoder> createMacintoshEncoder(const EncoderProto& config)
{
return std::unique_ptr<Encoder>(new MacintoshEncoder(config));
}

View File

@@ -1,12 +1,12 @@
#ifndef MACINTOSH_H
#define MACINTOSH_H
#define MAC_SECTOR_RECORD 0xd5aa96 /* 1101 0101 1010 1010 1001 0110 */
#define MAC_DATA_RECORD 0xd5aaad /* 1101 0101 1010 1010 1010 1101 */
#define MAC_SECTOR_RECORD 0xd5aa96 /* 1101 0101 1010 1010 1001 0110 */
#define MAC_DATA_RECORD 0xd5aaad /* 1101 0101 1010 1010 1010 1101 */
#define MAC_SECTOR_LENGTH 524 /* yes, really */
#define MAC_SECTOR_LENGTH 524 /* yes, really */
#define MAC_ENCODED_SECTOR_LENGTH 703
#define MAC_FORMAT_BYTE 0x22
#define MAC_FORMAT_BYTE 0x22
#define MAC_TRACKS_PER_DISK 80
@@ -15,8 +15,9 @@ class Decoder;
class DecoderProto;
class EncoderProto;
extern std::unique_ptr<Decoder> createMacintoshDecoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createMacintoshEncoder(const EncoderProto& config);
extern std::unique_ptr<Decoder> createMacintoshDecoder(
const DecoderProto& config);
extern std::unique_ptr<Encoder> createMacintoshEncoder(
const EncoderProto& config);
#endif

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message MacintoshDecoderProto {}

View File

@@ -1,10 +1,11 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "micropolis.h"
#include "bytes.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/micropolis/micropolis.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include "lib/decoders/decoders.pb.h"
@@ -20,17 +21,20 @@ static const FluxPattern SECTOR_SYNC_PATTERN(64, 0xAAAAAAAAAAAA5555LL);
static const FluxPattern SECTOR_ADVANCE_PATTERN(64, 0xAAAAAAAAAAAAAAAALL);
/* Standard Micropolis checksum. Adds all bytes, with carry. */
uint8_t micropolisChecksum(const Bytes& bytes) {
ByteReader br(bytes);
uint16_t sum = 0;
while (!br.eof()) {
if (sum > 0xFF) {
sum -= 0x100 - 1;
}
sum += br.read_8();
}
/* The last carry is ignored */
return sum & 0xFF;
uint8_t micropolisChecksum(const Bytes& bytes)
{
ByteReader br(bytes);
uint16_t sum = 0;
while (!br.eof())
{
if (sum > 0xFF)
{
sum -= 0x100 - 1;
}
sum += br.read_8();
}
/* The last carry is ignored */
return sum & 0xFF;
}
/* Vector MZOS does not use the standard Micropolis checksum.
@@ -41,145 +45,251 @@ uint8_t micropolisChecksum(const Bytes& bytes) {
* Unlike the Micropolis checksum, this does not cover the 12-byte
* header (track, sector, 10 OS-specific bytes.)
*/
uint8_t mzosChecksum(const Bytes& bytes) {
ByteReader br(bytes);
uint8_t checksum = 0;
uint8_t databyte;
uint8_t mzosChecksum(const Bytes& bytes)
{
ByteReader br(bytes);
uint8_t checksum = 0;
uint8_t databyte;
while (!br.eof()) {
databyte = br.read_8();
checksum ^= ((databyte << 1) | (databyte >> 7));
}
while (!br.eof())
{
databyte = br.read_8();
checksum ^= ((databyte << 1) | (databyte >> 7));
}
return checksum;
return checksum;
}
static uint8_t b(uint32_t field, uint8_t pos)
{
return (field >> pos) & 1;
}
static uint8_t eccNextBit(uint32_t ecc, uint8_t data_bit)
{
// This is 0x81932080 which is 0x0104C981 with reversed bits
return b(ecc, 7) ^ b(ecc, 13) ^ b(ecc, 16) ^ b(ecc, 17) ^ b(ecc, 20) ^
b(ecc, 23) ^ b(ecc, 24) ^ b(ecc, 31) ^ data_bit;
}
uint32_t vectorGraphicEcc(const Bytes& bytes)
{
uint32_t e = 0;
Bytes payloadBytes = bytes.slice(0, bytes.size() - 4);
ByteReader payload(payloadBytes);
while (!payload.eof())
{
uint8_t byte = payload.read_8();
for (int i = 0; i < 8; i++)
{
e = (e << 1) | eccNextBit(e, byte >> 7);
byte <<= 1;
}
}
Bytes trailerBytes = bytes.slice(bytes.size() - 4);
ByteReader trailer(trailerBytes);
uint32_t res = e;
while (!trailer.eof())
{
uint8_t byte = trailer.read_8();
for (int i = 0; i < 8; i++)
{
res = (res << 1) | eccNextBit(e, byte >> 7);
e <<= 1;
byte <<= 1;
}
}
return res;
}
/* Fixes bytes when possible, returning true if changed. */
static bool vectorGraphicEccFix(Bytes& bytes, uint32_t syndrome)
{
uint32_t ecc = syndrome;
int pos = (MICROPOLIS_ENCODED_SECTOR_SIZE - 5) * 8 + 7;
bool aligned = false;
while ((ecc & 0xff000000) == 0)
{
pos += 8;
ecc <<= 8;
}
for (; pos >= 0; pos--)
{
bool bit = ecc & 1;
ecc >>= 1;
if (bit)
ecc ^= 0x808264c0;
if ((ecc & 0xff07ffff) == 0)
aligned = true;
if (aligned && pos % 8 == 0)
break;
}
if (pos < 0)
return false;
bytes[pos / 8] ^= ecc >> 16;
return true;
}
class MicropolisDecoder : public Decoder
{
public:
MicropolisDecoder(const DecoderProto& config):
Decoder(config),
_config(config.micropolis())
{
_checksumType = _config.checksum_type();
}
MicropolisDecoder(const DecoderProto& config):
Decoder(config),
_config(config.micropolis())
{
_checksumType = _config.checksum_type();
}
nanoseconds_t advanceToNextRecord() override
{
nanoseconds_t now = tell().ns();
nanoseconds_t advanceToNextRecord() override
{
nanoseconds_t now = tell().ns();
/* For all but the first sector, seek to the next sector pulse.
* The first sector does not contain the sector pulse in the fluxmap.
*/
if (now != 0) {
seekToIndexMark();
now = tell().ns();
}
/* For all but the first sector, seek to the next sector pulse.
* The first sector does not contain the sector pulse in the fluxmap.
*/
if (now != 0)
{
seekToIndexMark();
now = tell().ns();
}
/* Discard a possible partial sector at the end of the track.
* This partial sector could be mistaken for a conflicted sector, if
* whatever data read happens to match the checksum of 0, which is
* rare, but has been observed on some disks.
*/
if (now > (getFluxmapDuration() - 12.5e6)) {
seekToIndexMark();
return 0;
}
/* Discard a possible partial sector at the end of the track.
* This partial sector could be mistaken for a conflicted sector, if
* whatever data read happens to match the checksum of 0, which is
* rare, but has been observed on some disks. There's 570uS of slack in
* each sector, after accounting for preamble, data, and postamble.
*/
if (now > (getFluxmapDuration() - 12.0e6))
{
seekToIndexMark();
return 0;
}
nanoseconds_t clock = seekToPattern(SECTOR_SYNC_PATTERN);
nanoseconds_t clock = seekToPattern(SECTOR_SYNC_PATTERN);
auto syncDelta = tell().ns() - now;
/* Due to the weak nature of the Micropolis SYNC patern,
* it's possible to detect a false SYNC during the gap
* between the sector pulse and the write gate. If the SYNC
* is detected less than 100uS after the sector pulse, search
* for another valid SYNC.
*
* Reference: Vector Micropolis Disk Controller Board Technical
* Information Manual, pp. 1-16.
*/
if ((syncDelta > 0) && (syncDelta < 100e3)) {
seekToPattern(SECTOR_ADVANCE_PATTERN);
clock = seekToPattern(SECTOR_SYNC_PATTERN);
}
auto syncDelta = tell().ns() - now;
/* Due to the weak nature of the Micropolis SYNC patern,
* it's possible to detect a false SYNC during the gap
* between the sector pulse and the write gate. If the SYNC
* is detected less than 100uS after the sector pulse, search
* for another valid SYNC.
*
* Reference: Vector Micropolis Disk Controller Board Technical
* Information Manual, pp. 1-16.
*/
if ((syncDelta > 0) && (syncDelta < 100e3))
{
seekToPattern(SECTOR_ADVANCE_PATTERN);
clock = seekToPattern(SECTOR_SYNC_PATTERN);
}
_sector->headerStartTime = tell().ns();
_sector->headerStartTime = tell().ns();
/* seekToPattern() can skip past the index hole, if this happens
* too close to the end of the Fluxmap, discard the sector.
*/
if (_sector->headerStartTime > (getFluxmapDuration() - 12.5e6)) {
return 0;
}
/* seekToPattern() can skip past the index hole, if this happens
* too close to the end of the Fluxmap, discard the sector. The
* preamble was expected to be 640uS long.
*/
if (_sector->headerStartTime > (getFluxmapDuration() - 11.3e6))
{
return 0;
}
return clock;
}
return clock;
}
void decodeSectorRecord() override
{
readRawBits(48);
auto rawbits = readRawBits(MICROPOLIS_ENCODED_SECTOR_SIZE*16);
auto bytes = decodeFmMfm(rawbits).slice(0, MICROPOLIS_ENCODED_SECTOR_SIZE);
ByteReader br(bytes);
void decodeSectorRecord() override
{
readRawBits(48);
auto rawbits = readRawBits(MICROPOLIS_ENCODED_SECTOR_SIZE * 16);
auto bytes =
decodeFmMfm(rawbits).slice(0, MICROPOLIS_ENCODED_SECTOR_SIZE);
int syncByte = br.read_8(); /* sync */
if (syncByte != 0xFF)
return;
bool eccPresent = bytes[274] == 0xaa;
uint32_t ecc = 0;
if (_config.ecc_type() == MicropolisDecoderProto::VECTOR && eccPresent)
{
ecc = vectorGraphicEcc(bytes.slice(0, 274));
if (ecc != 0)
{
vectorGraphicEccFix(bytes, ecc);
ecc = vectorGraphicEcc(bytes.slice(0, 274));
}
}
_sector->logicalTrack = br.read_8();
_sector->logicalSide = _sector->physicalSide;
_sector->logicalSector = br.read_8();
if (_sector->logicalSector > 15)
return;
if (_sector->logicalTrack > 76)
return;
if (_sector->logicalTrack != _sector->physicalTrack)
return;
ByteReader br(bytes);
br.read(10); /* OS data or padding */
auto data = br.read(MICROPOLIS_PAYLOAD_SIZE);
uint8_t wantChecksum = br.read_8();
int syncByte = br.read_8(); /* sync */
if (syncByte != 0xFF)
return;
/* If not specified, automatically determine the checksum type.
* Once the checksum type is determined, it will be used for the
* entire disk.
*/
if (_checksumType == MicropolisDecoderProto::AUTO) {
/* Calculate both standard Micropolis (MDOS, CP/M, OASIS) and MZOS checksums */
if (wantChecksum == micropolisChecksum(bytes.slice(1, 2+266))) {
_checksumType = MicropolisDecoderProto::MICROPOLIS;
} else if (wantChecksum == mzosChecksum(bytes.slice(MICROPOLIS_HEADER_SIZE, MICROPOLIS_PAYLOAD_SIZE))) {
_checksumType = MicropolisDecoderProto::MZOS;
std::cout << "Note: MZOS checksum detected." << std::endl;
}
}
_sector->logicalTrack = br.read_8();
_sector->logicalSide = _sector->physicalSide;
_sector->logicalSector = br.read_8();
if (_sector->logicalSector > 15)
return;
if (_sector->logicalTrack > 76)
return;
if (_sector->logicalTrack != _sector->physicalTrack)
return;
uint8_t gotChecksum;
br.read(10); /* OS data or padding */
auto data = br.read(MICROPOLIS_PAYLOAD_SIZE);
uint8_t wantChecksum = br.read_8();
if (_checksumType == MicropolisDecoderProto::MZOS) {
gotChecksum = mzosChecksum(bytes.slice(MICROPOLIS_HEADER_SIZE, MICROPOLIS_PAYLOAD_SIZE));
} else {
gotChecksum = micropolisChecksum(bytes.slice(1, 2+266));
}
/* If not specified, automatically determine the checksum type.
* Once the checksum type is determined, it will be used for the
* entire disk.
*/
if (_checksumType == MicropolisDecoderProto::AUTO)
{
/* Calculate both standard Micropolis (MDOS, CP/M, OASIS) and MZOS
* checksums */
if (wantChecksum == micropolisChecksum(bytes.slice(1, 2 + 266)))
{
_checksumType = MicropolisDecoderProto::MICROPOLIS;
}
else if (wantChecksum ==
mzosChecksum(bytes.slice(
MICROPOLIS_HEADER_SIZE, MICROPOLIS_PAYLOAD_SIZE)))
{
_checksumType = MicropolisDecoderProto::MZOS;
std::cout << "Note: MZOS checksum detected." << std::endl;
}
}
br.read(5); /* 4 byte ECC and ECC-present flag */
uint8_t gotChecksum;
if (_config.sector_output_size() == MICROPOLIS_PAYLOAD_SIZE)
_sector->data = data;
else if (_config.sector_output_size() == MICROPOLIS_ENCODED_SECTOR_SIZE)
_sector->data = bytes;
else
Error() << "Sector output size may only be 256 or 275";
_sector->status = (wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
if (_checksumType == MicropolisDecoderProto::MZOS)
{
gotChecksum = mzosChecksum(
bytes.slice(MICROPOLIS_HEADER_SIZE, MICROPOLIS_PAYLOAD_SIZE));
}
else
{
gotChecksum = micropolisChecksum(bytes.slice(1, 2 + 266));
}
br.read(5); /* 4 byte ECC and ECC-present flag */
if (_config.sector_output_size() == MICROPOLIS_PAYLOAD_SIZE)
_sector->data = data;
else if (_config.sector_output_size() == MICROPOLIS_ENCODED_SECTOR_SIZE)
_sector->data = bytes;
else
error("Sector output size may only be 256 or 275");
if (wantChecksum == gotChecksum && (!eccPresent || ecc == 0))
_sector->status = Sector::OK;
else
_sector->status = Sector::BAD_CHECKSUM;
}
private:
const MicropolisDecoderProto& _config;
MicropolisDecoderProto_ChecksumType _checksumType; /* -1 = auto, 1 = Micropolis, 2=MZOS */
const MicropolisDecoderProto& _config;
MicropolisDecoderProto_ChecksumType
_checksumType; /* -1 = auto, 1 = Micropolis, 2=MZOS */
};
std::unique_ptr<Decoder> createMicropolisDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new MicropolisDecoder(config));
return std::unique_ptr<Decoder>(new MicropolisDecoder(config));
}

View File

@@ -1,18 +1,19 @@
#include "globals.h"
#include "micropolis.h"
#include "sector.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "image.h"
#include "lib/core/globals.h"
#include "arch/micropolis/micropolis.h"
#include "lib/data/sector.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "lib/data/image.h"
#include "lib/encoders/encoders.pb.h"
static void write_sector(std::vector<bool>& bits,
unsigned& cursor,
const std::shared_ptr<const Sector>& sector)
const std::shared_ptr<const Sector>& sector,
MicropolisEncoderProto::EccType eccType)
{
if ((sector->data.size() != 256) &&
(sector->data.size() != MICROPOLIS_ENCODED_SECTOR_SIZE))
Error() << "unsupported sector size --- you must pick 256 or 275";
error("unsupported sector size --- you must pick 256 or 275");
int fullSectorSize = 40 + MICROPOLIS_ENCODED_SECTOR_SIZE + 40 + 35;
auto fullSector = std::make_shared<std::vector<uint8_t>>();
@@ -24,8 +25,9 @@ static void write_sector(std::vector<bool>& bits,
if (sector->data.size() == MICROPOLIS_ENCODED_SECTOR_SIZE)
{
if (sector->data[0] != 0xFF)
Error() << "275 byte sector doesn't start with sync byte 0xFF. "
"Corrupted sector";
error(
"275 byte sector doesn't start with sync byte 0xFF. "
"Corrupted sector");
uint8_t wantChecksum = sector->data[1 + 2 + 266];
uint8_t gotChecksum =
micropolisChecksum(sector->data.slice(1, 2 + 266));
@@ -44,8 +46,16 @@ static void write_sector(std::vector<bool>& bits,
writer.write_8(0); /* Padding */
writer += sector->data;
writer.write_8(micropolisChecksum(sectorData.slice(1)));
for (int i = 0; i < 5; i++)
writer.write_8(0); /* 4 byte ECC and ECC not present flag */
uint8_t eccPresent = 0;
uint32_t ecc = 0;
if (eccType == MicropolisEncoderProto::VECTOR)
{
eccPresent = 0xaa;
ecc = vectorGraphicEcc(sectorData + Bytes(4));
}
writer.write_be32(ecc);
writer.write_8(eccPresent);
}
for (uint8_t b : sectorData)
fullSector->push_back(b);
@@ -57,7 +67,7 @@ static void write_sector(std::vector<bool>& bits,
fullSector->push_back(0);
if (fullSector->size() != fullSectorSize)
Error() << "sector mismatched length";
error("sector mismatched length");
bool lastBit = false;
encodeMfm(bits, cursor, fullSector, lastBit);
/* filler */
@@ -85,19 +95,34 @@ public:
(_config.rotational_period_ms() * 1e3) / _config.clock_period_us();
std::vector<bool> bits(bitsPerRevolution);
std::vector<unsigned> indexes;
unsigned prev_cursor = 0;
unsigned cursor = 0;
for (const auto& sectorData : sectors)
write_sector(bits, cursor, sectorData);
{
indexes.push_back(cursor);
prev_cursor = cursor;
write_sector(bits, cursor, sectorData, _config.ecc_type());
}
indexes.push_back(prev_cursor + (cursor - prev_cursor) / 2);
indexes.push_back(cursor);
if (cursor != bits.size())
Error() << "track data mismatched length";
error("track data mismatched length");
std::unique_ptr<Fluxmap> fluxmap(new Fluxmap);
fluxmap->appendBits(bits,
calculatePhysicalClockPeriod(
_config.clock_period_us() * 1e3,
_config.rotational_period_ms() * 1e6));
nanoseconds_t clockPeriod =
calculatePhysicalClockPeriod(_config.clock_period_us() * 1e3,
_config.rotational_period_ms() * 1e6);
auto pos = bits.begin();
for (int i = 1; i < indexes.size(); i++)
{
auto end = bits.begin() + indexes[i];
fluxmap->appendBits(std::vector<bool>(pos, end), clockPeriod);
fluxmap->appendIndex();
pos = end;
}
return fluxmap;
}
@@ -105,8 +130,7 @@ private:
const MicropolisEncoderProto& _config;
};
std::unique_ptr<Encoder> createMicropolisEncoder(
const EncoderProto& config)
std::unique_ptr<Encoder> createMicropolisEncoder(const EncoderProto& config)
{
return std::unique_ptr<Encoder>(new MicropolisEncoder(config));
}

View File

@@ -1,18 +1,22 @@
#ifndef MICROPOLIS_H
#define MICROPOLIS_H
#define MICROPOLIS_PAYLOAD_SIZE (256)
#define MICROPOLIS_HEADER_SIZE (1+2+10)
#define MICROPOLIS_ENCODED_SECTOR_SIZE (MICROPOLIS_HEADER_SIZE + MICROPOLIS_PAYLOAD_SIZE + 6)
#define MICROPOLIS_PAYLOAD_SIZE (256)
#define MICROPOLIS_HEADER_SIZE (1 + 2 + 10)
#define MICROPOLIS_ENCODED_SECTOR_SIZE \
(MICROPOLIS_HEADER_SIZE + MICROPOLIS_PAYLOAD_SIZE + 6)
class Decoder;
class Encoder;
class EncoderProto;
class DecoderProto;
extern std::unique_ptr<Decoder> createMicropolisDecoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createMicropolisEncoder(const EncoderProto& config);
extern std::unique_ptr<Decoder> createMicropolisDecoder(
const DecoderProto& config);
extern std::unique_ptr<Encoder> createMicropolisEncoder(
const EncoderProto& config);
extern uint8_t micropolisChecksum(const Bytes& bytes);
extern uint32_t vectorGraphicEcc(const Bytes& bytes);
#endif

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message MicropolisDecoderProto {
enum ChecksumType {
@@ -8,17 +8,30 @@ message MicropolisDecoderProto {
MICROPOLIS = 1;
MZOS = 2;
}
enum EccType {
NONE = 0;
VECTOR = 1;
}
optional int32 sector_output_size = 1 [default = 256,
(help) = "How much of the raw sector should be saved. Must be 256 or 275"];
optional ChecksumType checksum_type = 2 [default = AUTO,
(help) = "Checksum type to use: AUTO, MICROPOLIS, MZOS"];
optional EccType ecc_type = 3 [default = NONE,
(help) = "ECC type to use: NONE, VECTOR"];
}
message MicropolisEncoderProto {
enum EccType {
NONE = 0;
VECTOR = 1;
}
optional double clock_period_us = 1
[ default = 2.0, (help) = "clock rate on the real device" ];
optional double rotational_period_ms = 2
[ default = 166.0, (help) = "rotational period on the real device" ];
[ default = 200.0, (help) = "rotational period on the real device" ];
optional EccType ecc_type = 3 [default = NONE,
(help) = "ECC type to use for IMG data: NONE, VECTOR"];
}

View File

@@ -1,10 +1,11 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "mx/mx.h"
#include "crc.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "sector.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "arch/mx/mx.h"
#include "lib/core/crc.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/data/sector.h"
#include <string.h>
const int SECTOR_SIZE = 256;
@@ -26,52 +27,51 @@ const FluxPattern ID_PATTERN(32, 0xaaaaffaf);
class MxDecoder : public Decoder
{
public:
MxDecoder(const DecoderProto& config):
Decoder(config)
{}
MxDecoder(const DecoderProto& config): Decoder(config) {}
void beginTrack() override
{
_clock = _sector->clock = seekToPattern(ID_PATTERN);
_currentSector = 0;
}
{
_clock = _sector->clock = seekToPattern(ID_PATTERN);
_currentSector = 0;
}
nanoseconds_t advanceToNextRecord() override
{
if (_currentSector == 11)
{
/* That was the last sector on the disk. */
return 0;
}
else
return _clock;
}
{
if (_currentSector == 11)
{
/* That was the last sector on the disk. */
return 0;
}
else
return _clock;
}
void decodeSectorRecord() override
{
/* Skip the ID pattern and track word, which is only present on the
* first sector. We don't trust the track word because some driver
* don't write it correctly. */
{
/* Skip the ID pattern and track word, which is only present on the
* first sector. We don't trust the track word because some driver
* don't write it correctly. */
if (_currentSector == 0)
readRawBits(64);
if (_currentSector == 0)
readRawBits(64);
auto bits = readRawBits((SECTOR_SIZE+2)*16);
auto bytes = decodeFmMfm(bits).slice(0, SECTOR_SIZE+2);
auto bits = readRawBits((SECTOR_SIZE + 2) * 16);
auto bytes = decodeFmMfm(bits).slice(0, SECTOR_SIZE + 2);
uint16_t gotChecksum = 0;
ByteReader br(bytes);
for (int i=0; i<(SECTOR_SIZE/2); i++)
gotChecksum += br.read_be16();
uint16_t wantChecksum = br.read_be16();
uint16_t gotChecksum = 0;
ByteReader br(bytes);
for (int i = 0; i < (SECTOR_SIZE / 2); i++)
gotChecksum += br.read_be16();
uint16_t wantChecksum = br.read_be16();
_sector->logicalTrack = _sector->physicalTrack;
_sector->logicalSide = _sector->physicalSide;
_sector->logicalSector = _currentSector;
_sector->data = bytes.slice(0, SECTOR_SIZE).swab();
_sector->status = (gotChecksum == wantChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
_currentSector++;
}
_sector->logicalTrack = _sector->physicalTrack;
_sector->logicalSide = _sector->physicalSide;
_sector->logicalSector = _currentSector;
_sector->data = bytes.slice(0, SECTOR_SIZE).swab();
_sector->status =
(gotChecksum == wantChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
_currentSector++;
}
private:
nanoseconds_t _clock;
@@ -80,7 +80,5 @@ private:
std::unique_ptr<Decoder> createMxDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new MxDecoder(config));
return std::unique_ptr<Decoder>(new MxDecoder(config));
}

View File

@@ -1,7 +1,7 @@
#ifndef MX_H
#define MX_H
#include "decoders/decoders.h"
#include "lib/decoders/decoders.h"
extern std::unique_ptr<Decoder> createMxDecoder(const DecoderProto& config);

View File

@@ -11,18 +11,19 @@
* sure that the hardSectorId is correct.
*/
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "northstar.h"
#include "bytes.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/northstar/northstar.h"
#include "lib/core/bytes.h"
#include "lib/decoders/decoders.pb.h"
#include "fmt/format.h"
#define MFM_ID 0xaaaaaaaaaaaa5545LL
#define FM_ID 0xaaaaaaaaaaaaffefLL
#define FM_ID 0xaaaaaaaaaaaaffefLL
/*
* MFM sectors have 32 bytes of 00's followed by two sync characters,
* specified in the North Star MDS manual as 0xFBFB.
@@ -44,133 +45,143 @@ static const FluxPattern MFM_PATTERN(64, MFM_ID);
*/
static const FluxPattern FM_PATTERN(64, FM_ID);
const FluxMatchers ANY_SECTOR_PATTERN(
{
&MFM_PATTERN,
&FM_PATTERN,
}
);
const FluxMatchers ANY_SECTOR_PATTERN({
&MFM_PATTERN,
&FM_PATTERN,
});
/* Checksum is initially 0.
* For each data byte, XOR with the current checksum.
* Rotate checksum left, carrying bit 7 to bit 0.
*/
uint8_t northstarChecksum(const Bytes& bytes) {
ByteReader br(bytes);
uint8_t checksum = 0;
uint8_t northstarChecksum(const Bytes& bytes)
{
ByteReader br(bytes);
uint8_t checksum = 0;
while (!br.eof()) {
checksum ^= br.read_8();
checksum = ((checksum << 1) | ((checksum >> 7)));
}
while (!br.eof())
{
checksum ^= br.read_8();
checksum = ((checksum << 1) | ((checksum >> 7)));
}
return checksum;
return checksum;
}
class NorthstarDecoder : public Decoder
{
public:
NorthstarDecoder(const DecoderProto& config):
Decoder(config),
_config(config.northstar())
{}
NorthstarDecoder(const DecoderProto& config):
Decoder(config),
_config(config.northstar())
{
}
/* Search for FM or MFM sector record */
nanoseconds_t advanceToNextRecord() override
{
nanoseconds_t now = tell().ns();
/* Search for FM or MFM sector record */
nanoseconds_t advanceToNextRecord() override
{
nanoseconds_t now = tell().ns();
/* For all but the first sector, seek to the next sector pulse.
* The first sector does not contain the sector pulse in the fluxmap.
*/
if (now != 0) {
seekToIndexMark();
now = tell().ns();
}
/* For all but the first sector, seek to the next sector pulse.
* The first sector does not contain the sector pulse in the fluxmap.
*/
if (now != 0)
{
seekToIndexMark();
now = tell().ns();
}
/* Discard a possible partial sector at the end of the track.
* This partial sector could be mistaken for a conflicted sector, if
* whatever data read happens to match the checksum of 0, which is
* rare, but has been observed on some disks.
*/
if (now > (getFluxmapDuration() - 21e6)) {
seekToIndexMark();
return 0;
}
/* Discard a possible partial sector at the end of the track.
* This partial sector could be mistaken for a conflicted sector, if
* whatever data read happens to match the checksum of 0, which is
* rare, but has been observed on some disks.
*/
if (now > (getFluxmapDuration() - 21e6))
{
seekToIndexMark();
return 0;
}
int msSinceIndex = std::round(now / 1e6);
int msSinceIndex = std::round(now / 1e6);
/* Note that the seekToPattern ignores the sector pulses, so if
* a sector is not found for some reason, the seek will advance
* past one or more sector pulses. For this reason, calculate
* _hardSectorId after the sector header is found.
*/
nanoseconds_t clock = seekToPattern(ANY_SECTOR_PATTERN);
_sector->headerStartTime = tell().ns();
/* Note that the seekToPattern ignores the sector pulses, so if
* a sector is not found for some reason, the seek will advance
* past one or more sector pulses. For this reason, calculate
* _hardSectorId after the sector header is found.
*/
nanoseconds_t clock = seekToPattern(ANY_SECTOR_PATTERN);
_sector->headerStartTime = tell().ns();
/* Discard a possible partial sector. */
if (_sector->headerStartTime > (getFluxmapDuration() - 21e6)) {
return 0;
}
/* Discard a possible partial sector. */
if (_sector->headerStartTime > (getFluxmapDuration() - 21e6))
{
return 0;
}
int sectorFoundTimeRaw = std::round(_sector->headerStartTime / 1e6);
int sectorFoundTime;
int sectorFoundTimeRaw = std::round(_sector->headerStartTime / 1e6);
int sectorFoundTime;
/* Round time to the nearest 20ms */
if ((sectorFoundTimeRaw % 20) < 10) {
sectorFoundTime = (sectorFoundTimeRaw / 20) * 20;
}
else {
sectorFoundTime = ((sectorFoundTimeRaw + 20) / 20) * 20;
}
/* Round time to the nearest 20ms */
if ((sectorFoundTimeRaw % 20) < 10)
{
sectorFoundTime = (sectorFoundTimeRaw / 20) * 20;
}
else
{
sectorFoundTime = ((sectorFoundTimeRaw + 20) / 20) * 20;
}
/* Calculate the sector ID based on time since the index */
_hardSectorId = (sectorFoundTime / 20) % 10;
/* Calculate the sector ID based on time since the index */
_hardSectorId = (sectorFoundTime / 20) % 10;
return clock;
}
return clock;
}
void decodeSectorRecord() override
{
uint64_t id = toBytes(readRawBits(64)).reader().read_be64();
unsigned recordSize, payloadSize, headerSize;
void decodeSectorRecord() override
{
uint64_t id = toBytes(readRawBits(64)).reader().read_be64();
unsigned recordSize, payloadSize, headerSize;
if (id == MFM_ID) {
recordSize = NORTHSTAR_ENCODED_SECTOR_SIZE_DD;
payloadSize = NORTHSTAR_PAYLOAD_SIZE_DD;
headerSize = NORTHSTAR_HEADER_SIZE_DD;
}
else {
recordSize = NORTHSTAR_ENCODED_SECTOR_SIZE_SD;
payloadSize = NORTHSTAR_PAYLOAD_SIZE_SD;
headerSize = NORTHSTAR_HEADER_SIZE_SD;
}
if (id == MFM_ID)
{
recordSize = NORTHSTAR_ENCODED_SECTOR_SIZE_DD;
payloadSize = NORTHSTAR_PAYLOAD_SIZE_DD;
headerSize = NORTHSTAR_HEADER_SIZE_DD;
}
else
{
recordSize = NORTHSTAR_ENCODED_SECTOR_SIZE_SD;
payloadSize = NORTHSTAR_PAYLOAD_SIZE_SD;
headerSize = NORTHSTAR_HEADER_SIZE_SD;
}
auto rawbits = readRawBits(recordSize * 16);
auto bytes = decodeFmMfm(rawbits).slice(0, recordSize);
ByteReader br(bytes);
auto rawbits = readRawBits(recordSize * 16);
auto bytes = decodeFmMfm(rawbits).slice(0, recordSize);
ByteReader br(bytes);
_sector->logicalSide = _sector->physicalSide;
_sector->logicalSector = _hardSectorId;
_sector->logicalTrack = _sector->physicalTrack;
_sector->logicalSide = _sector->physicalSide;
_sector->logicalSector = _hardSectorId;
_sector->logicalTrack = _sector->physicalTrack;
if (headerSize == NORTHSTAR_HEADER_SIZE_DD) {
br.read_8(); /* MFM second Sync char, usually 0xFB */
}
if (headerSize == NORTHSTAR_HEADER_SIZE_DD)
{
br.read_8(); /* MFM second Sync char, usually 0xFB */
}
_sector->data = br.read(payloadSize);
uint8_t wantChecksum = br.read_8();
uint8_t gotChecksum = northstarChecksum(bytes.slice(headerSize - 1, payloadSize));
_sector->status = (wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->data = br.read(payloadSize);
uint8_t wantChecksum = br.read_8();
uint8_t gotChecksum =
northstarChecksum(bytes.slice(headerSize - 1, payloadSize));
_sector->status =
(wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
private:
const NorthstarDecoderProto& _config;
uint8_t _hardSectorId;
const NorthstarDecoderProto& _config;
uint8_t _hardSectorId;
};
std::unique_ptr<Decoder> createNorthstarDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new NorthstarDecoder(config));
return std::unique_ptr<Decoder>(new NorthstarDecoder(config));
}

View File

@@ -1,10 +1,10 @@
#include "globals.h"
#include "northstar.h"
#include "sector.h"
#include "bytes.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "image.h"
#include "lib/core/globals.h"
#include "arch/northstar/northstar.h"
#include "lib/data/sector.h"
#include "lib/core/bytes.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "lib/data/image.h"
#include "lib/encoders/encoders.pb.h"
#define GAP_FILL_SIZE_SD 30
@@ -49,7 +49,7 @@ static void write_sector(std::vector<bool>& bits,
doubleDensity = true;
break;
default:
Error() << "unsupported sector size --- you must pick 256 or 512";
error("unsupported sector size --- you must pick 256 or 512");
break;
}
@@ -96,9 +96,10 @@ static void write_sector(std::vector<bool>& bits,
fullSector->push_back(GAP2_FILL_BYTE);
if (fullSector->size() != fullSectorSize)
Error() << "sector mismatched length (" << sector->data.size()
<< ") expected: " << fullSector->size() << " got "
<< fullSectorSize;
error("sector mismatched length ({}); expected {}, got {}",
sector->data.size(),
fullSector->size(),
fullSectorSize);
}
else
{
@@ -148,7 +149,7 @@ public:
write_sector(bits, cursor, sectorData);
if (cursor > bits.size())
Error() << "track data overrun";
error("track data overrun");
std::unique_ptr<Fluxmap> fluxmap(new Fluxmap);
fluxmap->appendBits(bits,
@@ -161,8 +162,7 @@ private:
const NorthstarEncoderProto& _config;
};
std::unique_ptr<Encoder> createNorthstarEncoder(
const EncoderProto& config)
std::unique_ptr<Encoder> createNorthstarEncoder(const EncoderProto& config)
{
return std::unique_ptr<Encoder>(new NorthstarEncoder(config));
}

View File

@@ -1,7 +1,8 @@
#ifndef NORTHSTAR_H
#define NORTHSTAR_H
/* Northstar floppies are 10-hard sectored disks with a sector format as follows:
/* Northstar floppies are 10-hard sectored disks with a sector format as
* follows:
*
* |----------------------------------|
* | SYNC Byte | Payload | Checksum |
@@ -12,15 +13,19 @@
*
*/
#define NORTHSTAR_PREAMBLE_SIZE_SD (16)
#define NORTHSTAR_PREAMBLE_SIZE_DD (32)
#define NORTHSTAR_HEADER_SIZE_SD (1)
#define NORTHSTAR_HEADER_SIZE_DD (2)
#define NORTHSTAR_PAYLOAD_SIZE_SD (256)
#define NORTHSTAR_PAYLOAD_SIZE_DD (512)
#define NORTHSTAR_CHECKSUM_SIZE (1)
#define NORTHSTAR_ENCODED_SECTOR_SIZE_SD (NORTHSTAR_HEADER_SIZE_SD + NORTHSTAR_PAYLOAD_SIZE_SD + NORTHSTAR_CHECKSUM_SIZE)
#define NORTHSTAR_ENCODED_SECTOR_SIZE_DD (NORTHSTAR_HEADER_SIZE_DD + NORTHSTAR_PAYLOAD_SIZE_DD + NORTHSTAR_CHECKSUM_SIZE)
#define NORTHSTAR_PREAMBLE_SIZE_SD (16)
#define NORTHSTAR_PREAMBLE_SIZE_DD (32)
#define NORTHSTAR_HEADER_SIZE_SD (1)
#define NORTHSTAR_HEADER_SIZE_DD (2)
#define NORTHSTAR_PAYLOAD_SIZE_SD (256)
#define NORTHSTAR_PAYLOAD_SIZE_DD (512)
#define NORTHSTAR_CHECKSUM_SIZE (1)
#define NORTHSTAR_ENCODED_SECTOR_SIZE_SD \
(NORTHSTAR_HEADER_SIZE_SD + NORTHSTAR_PAYLOAD_SIZE_SD + \
NORTHSTAR_CHECKSUM_SIZE)
#define NORTHSTAR_ENCODED_SECTOR_SIZE_DD \
(NORTHSTAR_HEADER_SIZE_DD + NORTHSTAR_PAYLOAD_SIZE_DD + \
NORTHSTAR_CHECKSUM_SIZE)
class Decoder;
class Encoder;
@@ -29,7 +34,9 @@ class DecoderProto;
extern uint8_t northstarChecksum(const Bytes& bytes);
extern std::unique_ptr<Decoder> createNorthstarDecoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createNorthstarEncoder(const EncoderProto& config);
extern std::unique_ptr<Decoder> createNorthstarDecoder(
const DecoderProto& config);
extern std::unique_ptr<Encoder> createNorthstarEncoder(
const EncoderProto& config);
#endif /* NORTHSTAR */

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message NorthstarDecoderProto {}

View File

@@ -1,11 +1,12 @@
#include "lib/globals.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "lib/crc.h"
#include "lib/fluxmap.h"
#include "lib/decoders/fluxmapreader.h"
#include "lib/sector.h"
#include "lib/bytes.h"
#include "rolandd20.h"
#include "lib/core/crc.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/data/sector.h"
#include "lib/core/bytes.h"
#include "arch/rolandd20/rolandd20.h"
#include <string.h>
/* Sector header record:
@@ -29,28 +30,23 @@ static const FluxPattern SECTOR_PATTERN(64, 0xed55555555555555LL);
class RolandD20Decoder : public Decoder
{
public:
RolandD20Decoder(const DecoderProto& config):
Decoder(config)
{}
RolandD20Decoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(SECTOR_PATTERN);
}
{
return seekToPattern(SECTOR_PATTERN);
}
void decodeSectorRecord() override
{
auto rawbits = readRawBits(256);
const auto& bytes = decodeFmMfm(rawbits);
fmt::print("{} ", _sector->clock);
hexdump(std::cout, bytes);
}
{
auto rawbits = readRawBits(256);
const auto& bytes = decodeFmMfm(rawbits);
fmt::print("{} ", _sector->clock);
hexdump(std::cout, bytes);
}
};
std::unique_ptr<Decoder> createRolandD20Decoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new RolandD20Decoder(config));
return std::unique_ptr<Decoder>(new RolandD20Decoder(config));
}

View File

@@ -1,4 +1,4 @@
#pragma once
extern std::unique_ptr<Decoder> createRolandD20Decoder(const DecoderProto& config);
extern std::unique_ptr<Decoder> createRolandD20Decoder(
const DecoderProto& config);

View File

@@ -1,12 +1,13 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "smaky6.h"
#include "bytes.h"
#include "crc.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/smaky6/smaky6.h"
#include "lib/core/bytes.h"
#include "lib/core/crc.h"
#include "fmt/format.h"
#include "lib/decoders/decoders.pb.h"
#include <string.h>

View File

@@ -7,4 +7,3 @@
extern std::unique_ptr<Decoder> createSmaky6Decoder(const DecoderProto& config);
#endif

View File

@@ -1,6 +1,4 @@
syntax = "proto2";
import "lib/common.proto";
message Smaky6DecoderProto {}

81
arch/tartu/decoder.cc Normal file
View File

@@ -0,0 +1,81 @@
#include "lib/core/globals.h"
#include "lib/config/config.h"
#include "lib/decoders/decoders.h"
#include "arch/tartu/tartu.h"
#include "lib/core/crc.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/data/sector.h"
#include <string.h>
constexpr uint64_t HEADER_BITS = 0xaaaaaaaa44895554LL;
constexpr uint64_t DATA_BITS = 0xaaaaaaaa44895545LL;
static const FluxPattern HEADER_PATTERN(64, HEADER_BITS);
static const FluxPattern DATA_PATTERN(64, DATA_BITS);
const FluxMatchers ANY_RECORD_PATTERN{&HEADER_PATTERN, &DATA_PATTERN};
class TartuDecoder : public Decoder
{
public:
TartuDecoder(const DecoderProto& config):
Decoder(config),
_config(config.tartu())
{
}
void beginTrack() override {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(ANY_RECORD_PATTERN);
}
void decodeSectorRecord() override
{
if (readRaw64() != HEADER_BITS)
return;
auto bits = readRawBits(16 * 4);
auto bytes = decodeFmMfm(bits).slice(0, 4);
ByteReader br(bytes);
uint8_t track = br.read_8();
_sector->logicalTrack = track >> 1;
_sector->logicalSide = track & 1;
br.skip(1); /* seems always to be 1 */
_sector->logicalSector = br.read_8();
uint8_t wantChecksum = br.read_8();
uint8_t gotChecksum = ~sumBytes(bytes.slice(0, 3));
if (wantChecksum == gotChecksum)
_sector->status = Sector::DATA_MISSING;
_sector->status = Sector::DATA_MISSING;
}
void decodeDataRecord() override
{
if (readRaw64() != DATA_BITS)
return;
const auto& bits = readRawBits(129 * 16);
const auto& bytes = decodeFmMfm(bits).slice(0, 129);
_sector->data = bytes.slice(0, 128);
uint8_t wantChecksum = bytes.reader().seek(128).read_8();
uint8_t gotChecksum = ~sumBytes(_sector->data);
_sector->status =
(wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
private:
const TartuDecoderProto& _config;
};
std::unique_ptr<Decoder> createTartuDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new TartuDecoder(config));
}

115
arch/tartu/encoder.cc Normal file
View File

@@ -0,0 +1,115 @@
#include "lib/core/globals.h"
#include "lib/config/config.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/tartu/tartu.h"
#include "lib/core/crc.h"
#include "lib/data/fluxmap.h"
#include "lib/data/sector.h"
#include <string.h>
class TartuEncoder : public Encoder
{
public:
TartuEncoder(const EncoderProto& config):
Encoder(config),
_config(config.tartu())
{
}
std::unique_ptr<Fluxmap> encode(std::shared_ptr<const TrackInfo>& trackInfo,
const std::vector<std::shared_ptr<const Sector>>& sectors,
const Image& image) override
{
_clockRateUs = _config.clock_period_us();
int bitsPerRevolution =
(_config.target_rotational_period_ms() * 1000.0) / _clockRateUs;
const auto& sector = *sectors.begin();
_bits.resize(bitsPerRevolution);
_cursor = 0;
writeFillerRawBitsUs(_config.gap1_us());
bool first = true;
for (const auto& sectorData : sectors)
{
if (!first)
writeFillerRawBitsUs(_config.gap4_us());
first = false;
writeSector(sectorData);
}
if (_cursor > _bits.size())
error("track data overrun");
writeFillerRawBitsUs(_config.target_rotational_period_ms() * 1000.0);
std::unique_ptr<Fluxmap> fluxmap(new Fluxmap);
fluxmap->appendBits(_bits,
calculatePhysicalClockPeriod(_clockRateUs * 1e3,
_config.target_rotational_period_ms() * 1e6));
return fluxmap;
}
private:
void writeBytes(const Bytes& bytes)
{
encodeMfm(_bits, _cursor, bytes, _lastBit);
}
void writeRawBits(uint64_t data, int width)
{
_cursor += width;
_lastBit = data & 1;
for (int i = 0; i < width; i++)
{
unsigned pos = _cursor - i - 1;
if (pos < _bits.size())
_bits[pos] = data & 1;
data >>= 1;
}
}
void writeFillerRawBitsUs(double us)
{
unsigned count = (us / _clockRateUs) / 2;
for (int i = 0; i < count; i++)
writeRawBits(0b10, 2);
};
void writeSector(const std::shared_ptr<const Sector>& sectorData)
{
writeRawBits(_config.header_marker(), 64);
{
Bytes bytes;
ByteWriter bw(bytes);
bw.write_8(
(sectorData->logicalTrack << 1) | sectorData->logicalSide);
bw.write_8(1);
bw.write_8(sectorData->logicalSector);
bw.write_8(~sumBytes(bytes.slice(0, 3)));
writeBytes(bytes);
}
writeFillerRawBitsUs(_config.gap3_us());
writeRawBits(_config.data_marker(), 64);
{
Bytes bytes;
ByteWriter bw(bytes);
bw.append(sectorData->data);
bw.write_8(~sumBytes(bytes.slice(0, sectorData->data.size())));
writeBytes(bytes);
}
}
private:
const TartuEncoderProto& _config;
double _clockRateUs;
std::vector<bool> _bits;
unsigned _cursor;
bool _lastBit;
};
std::unique_ptr<Encoder> createTartuEncoder(const EncoderProto& config)
{
return std::unique_ptr<Encoder>(new TartuEncoder(config));
}

7
arch/tartu/tartu.h Normal file
View File

@@ -0,0 +1,7 @@
#ifndef TARTU_H
#define TARTU_H
extern std::unique_ptr<Decoder> createTartuDecoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createTartuEncoder(const EncoderProto& config);
#endif

27
arch/tartu/tartu.proto Normal file
View File

@@ -0,0 +1,27 @@
syntax = "proto2";
import "lib/config/common.proto";
message TartuDecoderProto {}
message TartuEncoderProto {
optional double clock_period_us = 1
[ default = 2.0, (help) = "clock rate on the real device (for MFM)" ];
optional double target_rotational_period_ms = 2
[ default=200, (help) = "rotational period of target disk" ];
optional double gap1_us = 3
[ default = 1200,
(help) = "size of gap 1 (the post-index gap)" ];
optional double gap3_us = 4
[ default = 150,
(help) = "size of gap 3 (the pre-data gap)" ];
optional double gap4_us = 5
[ default = 180,
(help) = "size of gap 4 (the post-data or format gap)" ];
optional uint64 header_marker = 6
[ default = 0xaaaaaaaa44895554,
(help) = "64-bit raw bit pattern of header record marker" ];
optional uint64 data_marker = 7
[ default = 0xaaaaaaaa44895545,
(help) = "64-bit raw bit pattern of data record marker" ];
}

View File

@@ -1,13 +1,14 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "tids990/tids990.h"
#include "crc.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "sector.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/tids990/tids990.h"
#include "lib/core/crc.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "lib/data/sector.h"
#include <string.h>
#include <fmt/format.h>
#include "fmt/format.h"
/* The Texas Instruments DS990 uses MFM with a scheme similar to a simplified
* version of the IBM record scheme (it's actually easier to parse than IBM).
@@ -38,61 +39,63 @@ const FluxPattern SECTOR_RECORD_PATTERN(32, 0x11112244);
const uint16_t DATA_ID = 0x550b;
const FluxPattern DATA_RECORD_PATTERN(32, 0x11112245);
const FluxMatchers ANY_RECORD_PATTERN({ &SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN });
const FluxMatchers ANY_RECORD_PATTERN(
{&SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN});
class Tids990Decoder : public Decoder
{
public:
Tids990Decoder(const DecoderProto& config):
Decoder(config)
{}
Tids990Decoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(ANY_RECORD_PATTERN);
}
{
return seekToPattern(ANY_RECORD_PATTERN);
}
void decodeSectorRecord() override
{
auto bits = readRawBits(TIDS990_SECTOR_RECORD_SIZE*16);
auto bytes = decodeFmMfm(bits).slice(0, TIDS990_SECTOR_RECORD_SIZE);
{
auto bits = readRawBits(TIDS990_SECTOR_RECORD_SIZE * 16);
auto bytes = decodeFmMfm(bits).slice(0, TIDS990_SECTOR_RECORD_SIZE);
ByteReader br(bytes);
if (br.read_be16() != SECTOR_ID)
return;
ByteReader br(bytes);
if (br.read_be16() != SECTOR_ID)
return;
uint16_t gotChecksum = crc16(CCITT_POLY, bytes.slice(1, TIDS990_SECTOR_RECORD_SIZE-3));
uint16_t gotChecksum =
crc16(CCITT_POLY, bytes.slice(1, TIDS990_SECTOR_RECORD_SIZE - 3));
_sector->logicalSide = br.read_8() >> 3;
_sector->logicalTrack = br.read_8();
br.read_8(); /* number of sectors per track */
_sector->logicalSector = br.read_8();
br.read_be16(); /* sector size */
uint16_t wantChecksum = br.read_be16();
_sector->logicalSide = br.read_8() >> 3;
_sector->logicalTrack = br.read_8();
br.read_8(); /* number of sectors per track */
_sector->logicalSector = br.read_8();
br.read_be16(); /* sector size */
uint16_t wantChecksum = br.read_be16();
if (wantChecksum == gotChecksum)
_sector->status = Sector::DATA_MISSING; /* correct but unintuitive */
}
if (wantChecksum == gotChecksum)
_sector->status =
Sector::DATA_MISSING; /* correct but unintuitive */
}
void decodeDataRecord() override
{
auto bits = readRawBits(TIDS990_DATA_RECORD_SIZE*16);
auto bytes = decodeFmMfm(bits).slice(0, TIDS990_DATA_RECORD_SIZE);
void decodeDataRecord() override
{
auto bits = readRawBits(TIDS990_DATA_RECORD_SIZE * 16);
auto bytes = decodeFmMfm(bits).slice(0, TIDS990_DATA_RECORD_SIZE);
ByteReader br(bytes);
if (br.read_be16() != DATA_ID)
return;
ByteReader br(bytes);
if (br.read_be16() != DATA_ID)
return;
uint16_t gotChecksum = crc16(CCITT_POLY, bytes.slice(1, TIDS990_DATA_RECORD_SIZE-3));
uint16_t gotChecksum =
crc16(CCITT_POLY, bytes.slice(1, TIDS990_DATA_RECORD_SIZE - 3));
_sector->data = br.read(TIDS990_PAYLOAD_SIZE);
uint16_t wantChecksum = br.read_be16();
_sector->status = (wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->data = br.read(TIDS990_PAYLOAD_SIZE);
uint16_t wantChecksum = br.read_be16();
_sector->status =
(wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
};
std::unique_ptr<Decoder> createTids990Decoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new Tids990Decoder(config));
return std::unique_ptr<Decoder>(new Tids990Decoder(config));
}

View File

@@ -1,13 +1,12 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "tids990.h"
#include "crc.h"
#include "readerwriter.h"
#include "image.h"
#include "lib/core/globals.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/tids990/tids990.h"
#include "lib/core/crc.h"
#include "lib/data/image.h"
#include "arch/tids990/tids990.pb.h"
#include "lib/encoders/encoders.pb.h"
#include <fmt/format.h>
#include "fmt/format.h"
static int charToInt(char c)
{
@@ -127,14 +126,14 @@ public:
}
if (_cursor >= _bits.size())
Error() << "track data overrun";
error("track data overrun");
while (_cursor < _bits.size())
writeBytes(1, 0x55);
auto fluxmap = std::make_unique<Fluxmap>();
fluxmap->appendBits(_bits,
calculatePhysicalClockPeriod(clockRateUs * 1e3,
_config.rotational_period_ms() * 1e6));
calculatePhysicalClockPeriod(
clockRateUs * 1e3, _config.rotational_period_ms() * 1e6));
return fluxmap;
}
@@ -145,8 +144,7 @@ private:
bool _lastBit;
};
std::unique_ptr<Encoder> createTids990Encoder(
const EncoderProto& config)
std::unique_ptr<Encoder> createTids990Encoder(const EncoderProto& config)
{
return std::unique_ptr<Encoder>(new Tids990Encoder(config));
}

View File

@@ -1,18 +1,18 @@
#ifndef TIDS990_H
#define TIDS990_H
#define TIDS990_PAYLOAD_SIZE 288 /* bytes */
#define TIDS990_SECTOR_RECORD_SIZE 10 /* bytes */
#define TIDS990_DATA_RECORD_SIZE (TIDS990_PAYLOAD_SIZE + 4) /* bytes */
#define TIDS990_PAYLOAD_SIZE 288 /* bytes */
#define TIDS990_SECTOR_RECORD_SIZE 10 /* bytes */
#define TIDS990_DATA_RECORD_SIZE (TIDS990_PAYLOAD_SIZE + 4) /* bytes */
class Encoder;
class Decoder;
class DecoderProto;
class EncoderProto;
extern std::unique_ptr<Decoder> createTids990Decoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createTids990Encoder(const EncoderProto& config);
extern std::unique_ptr<Decoder> createTids990Decoder(
const DecoderProto& config);
extern std::unique_ptr<Encoder> createTids990Encoder(
const EncoderProto& config);
#endif

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message Tids990DecoderProto {}

View File

@@ -1,28 +1,31 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "victor9k.h"
#include "crc.h"
#include "bytes.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/victor9k/victor9k.h"
#include "lib/core/crc.h"
#include "lib/core/bytes.h"
#include "fmt/format.h"
#include <string.h>
#include <algorithm>
const FluxPattern SECTOR_RECORD_PATTERN(32, VICTOR9K_SECTOR_RECORD);
const FluxPattern DATA_RECORD_PATTERN(32, VICTOR9K_DATA_RECORD);
const FluxMatchers ANY_RECORD_PATTERN({ &SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN });
const FluxMatchers ANY_RECORD_PATTERN(
{&SECTOR_RECORD_PATTERN, &DATA_RECORD_PATTERN});
static int decode_data_gcr(uint8_t gcr)
{
switch (gcr)
{
#define GCR_ENTRY(gcr, data) \
case gcr: return data;
#include "data_gcr.h"
#undef GCR_ENTRY
#define GCR_ENTRY(gcr, data) \
case gcr: \
return data;
#include "data_gcr.h"
#undef GCR_ENTRY
}
return -1;
}
@@ -37,11 +40,11 @@ static Bytes decode(const std::vector<bool>& bits)
while (ii != bits.end())
{
uint8_t inputfifo = 0;
for (size_t i=0; i<5; i++)
for (size_t i = 0; i < 5; i++)
{
if (ii == bits.end())
break;
inputfifo = (inputfifo<<1) | *ii++;
inputfifo = (inputfifo << 1) | *ii++;
}
uint8_t decoded = decode_data_gcr(inputfifo);
@@ -55,63 +58,62 @@ static Bytes decode(const std::vector<bool>& bits)
class Victor9kDecoder : public Decoder
{
public:
Victor9kDecoder(const DecoderProto& config):
Decoder(config)
{}
Victor9kDecoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
return seekToPattern(ANY_RECORD_PATTERN);
}
{
return seekToPattern(ANY_RECORD_PATTERN);
}
void decodeSectorRecord() override
{
/* Check the ID. */
{
/* Check the ID. */
if (readRaw32() != VICTOR9K_SECTOR_RECORD)
return;
if (readRaw32() != VICTOR9K_SECTOR_RECORD)
return;
/* Read header. */
/* Read header. */
auto bytes = decode(readRawBits(3*10)).slice(0, 3);
auto bytes = decode(readRawBits(3 * 10)).slice(0, 3);
uint8_t rawTrack = bytes[0];
_sector->logicalSector = bytes[1];
uint8_t gotChecksum = bytes[2];
uint8_t rawTrack = bytes[0];
_sector->logicalSector = bytes[1];
uint8_t gotChecksum = bytes[2];
_sector->logicalTrack = rawTrack & 0x7f;
_sector->logicalSide = rawTrack >> 7;
uint8_t wantChecksum = bytes[0] + bytes[1];
if ((_sector->logicalSector > 20) || (_sector->logicalTrack > 85) || (_sector->logicalSide > 1))
return;
if (wantChecksum == gotChecksum)
_sector->status = Sector::DATA_MISSING; /* unintuitive but correct */
}
_sector->logicalTrack = rawTrack & 0x7f;
_sector->logicalSide = rawTrack >> 7;
uint8_t wantChecksum = bytes[0] + bytes[1];
if ((_sector->logicalSector > 20) || (_sector->logicalTrack > 85) ||
(_sector->logicalSide > 1))
return;
if (wantChecksum == gotChecksum)
_sector->status =
Sector::DATA_MISSING; /* unintuitive but correct */
}
void decodeDataRecord() override
{
/* Check the ID. */
{
/* Check the ID. */
if (readRaw32() != VICTOR9K_DATA_RECORD)
return;
if (readRaw32() != VICTOR9K_DATA_RECORD)
return;
/* Read data. */
/* Read data. */
auto bytes = decode(readRawBits((VICTOR9K_SECTOR_LENGTH+4)*10))
.slice(0, VICTOR9K_SECTOR_LENGTH+4);
ByteReader br(bytes);
auto bytes = decode(readRawBits((VICTOR9K_SECTOR_LENGTH + 4) * 10))
.slice(0, VICTOR9K_SECTOR_LENGTH + 4);
ByteReader br(bytes);
_sector->data = br.read(VICTOR9K_SECTOR_LENGTH);
uint16_t gotChecksum = sumBytes(_sector->data);
uint16_t wantChecksum = br.read_le16();
_sector->status = (gotChecksum == wantChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->data = br.read(VICTOR9K_SECTOR_LENGTH);
uint16_t gotChecksum = sumBytes(_sector->data);
uint16_t wantChecksum = br.read_le16();
_sector->status =
(gotChecksum == wantChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
};
std::unique_ptr<Decoder> createVictor9kDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new Victor9kDecoder(config));
return std::unique_ptr<Decoder>(new Victor9kDecoder(config));
}

View File

@@ -1,17 +1,17 @@
#include "globals.h"
#include "decoders/decoders.h"
#include "encoders/encoders.h"
#include "victor9k.h"
#include "crc.h"
#include "sector.h"
#include "readerwriter.h"
#include "image.h"
#include "lib/core/globals.h"
#include "lib/core/utils.h"
#include "lib/decoders/decoders.h"
#include "lib/encoders/encoders.h"
#include "arch/victor9k/victor9k.h"
#include "lib/core/crc.h"
#include "lib/data/sector.h"
#include "lib/data/image.h"
#include "fmt/format.h"
#include "arch/victor9k/victor9k.pb.h"
#include "lib/encoders/encoders.pb.h"
#include "lib/layout.h"
#include "lib/data/layout.h"
#include <ctype.h>
#include "bytes.h"
#include "lib/core/bytes.h"
static bool lastBit;
@@ -169,14 +169,15 @@ public:
const Image& image) override
{
Victor9kEncoderProto::TrackdataProto trackdata;
getTrackFormat(trackdata, trackInfo->logicalTrack, trackInfo->logicalSide);
getTrackFormat(
trackdata, trackInfo->logicalTrack, trackInfo->logicalSide);
unsigned bitsPerRevolution = (trackdata.rotational_period_ms() * 1e3) /
trackdata.clock_period_us();
std::vector<bool> bits(bitsPerRevolution);
nanoseconds_t clockPeriod = calculatePhysicalClockPeriod(
trackdata.clock_period_us() * 1e3,
trackdata.rotational_period_ms() * 1e6);
nanoseconds_t clockPeriod =
calculatePhysicalClockPeriod(trackdata.clock_period_us() * 1e3,
trackdata.rotational_period_ms() * 1e6);
unsigned cursor = 0;
fillBitmapTo(bits,
@@ -189,8 +190,7 @@ public:
write_sector(bits, cursor, trackdata, *sector);
if (cursor >= bits.size())
Error() << fmt::format(
"track data overrun by {} bits", cursor - bits.size());
error("track data overrun by {} bits", cursor - bits.size());
fillBitmapTo(bits, cursor, bits.size(), {true, false});
std::unique_ptr<Fluxmap> fluxmap(new Fluxmap);
@@ -202,8 +202,7 @@ private:
const Victor9kEncoderProto& _config;
};
std::unique_ptr<Encoder> createVictor9kEncoder(
const EncoderProto& config)
std::unique_ptr<Encoder> createVictor9kEncoder(const EncoderProto& config)
{
return std::unique_ptr<Encoder>(new Victor9kEncoder(config));
}

View File

@@ -13,12 +13,14 @@ class DecoderProto;
/* ... 1101 0100 1001
* ^^ ^^^^ ^^^^ ten bit IO byte */
#define VICTOR9K_DATA_RECORD 0xfffffd49
#define VICTOR9K_DATA_RECORD 0xfffffd49
#define VICTOR9K_DATA_ID 0x8
#define VICTOR9K_SECTOR_LENGTH 512
extern std::unique_ptr<Decoder> createVictor9kDecoder(const DecoderProto& config);
extern std::unique_ptr<Encoder> createVictor9kEncoder(const EncoderProto& config);
extern std::unique_ptr<Decoder> createVictor9kDecoder(
const DecoderProto& config);
extern std::unique_ptr<Encoder> createVictor9kEncoder(
const EncoderProto& config);
#endif

View File

@@ -1,6 +1,6 @@
syntax = "proto2";
import "lib/common.proto";
import "lib/config/common.proto";
message Victor9kDecoderProto {}

View File

@@ -1,12 +1,13 @@
#include "globals.h"
#include "fluxmap.h"
#include "decoders/fluxmapreader.h"
#include "lib/core/globals.h"
#include "lib/data/fluxmap.h"
#include "lib/data/fluxmapreader.h"
#include "lib/data/fluxpattern.h"
#include "protocol.h"
#include "decoders/decoders.h"
#include "sector.h"
#include "zilogmcz.h"
#include "bytes.h"
#include "crc.h"
#include "lib/decoders/decoders.h"
#include "lib/data/sector.h"
#include "arch/zilogmcz/zilogmcz.h"
#include "lib/core/bytes.h"
#include "lib/core/crc.h"
#include "fmt/format.h"
#include <string.h>
#include <algorithm>
@@ -16,42 +17,40 @@ static const FluxPattern SECTOR_START_PATTERN(16, 0xaaab);
class ZilogMczDecoder : public Decoder
{
public:
ZilogMczDecoder(const DecoderProto& config):
Decoder(config)
{}
ZilogMczDecoder(const DecoderProto& config): Decoder(config) {}
nanoseconds_t advanceToNextRecord() override
{
seekToIndexMark();
return seekToPattern(SECTOR_START_PATTERN);
}
{
seekToIndexMark();
return seekToPattern(SECTOR_START_PATTERN);
}
void decodeSectorRecord() override
{
readRawBits(14);
{
readRawBits(14);
auto rawbits = readRawBits(140*16);
auto bytes = decodeFmMfm(rawbits).slice(0, 140);
ByteReader br(bytes);
auto rawbits = readRawBits(140 * 16);
auto bytes = decodeFmMfm(rawbits).slice(0, 140);
ByteReader br(bytes);
_sector->logicalSector = br.read_8() & 0x1f;
_sector->logicalSide = 0;
_sector->logicalTrack = br.read_8() & 0x7f;
if (_sector->logicalSector > 31)
return;
if (_sector->logicalTrack > 80)
return;
_sector->logicalSector = br.read_8() & 0x1f;
_sector->logicalSide = 0;
_sector->logicalTrack = br.read_8() & 0x7f;
if (_sector->logicalSector > 31)
return;
if (_sector->logicalTrack > 80)
return;
_sector->data = br.read(132);
uint16_t wantChecksum = br.read_be16();
uint16_t gotChecksum = crc16(MODBUS_POLY, 0x0000, bytes.slice(0, 134));
_sector->data = br.read(132);
uint16_t wantChecksum = br.read_be16();
uint16_t gotChecksum = crc16(MODBUS_POLY, 0x0000, bytes.slice(0, 134));
_sector->status = (wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
_sector->status =
(wantChecksum == gotChecksum) ? Sector::OK : Sector::BAD_CHECKSUM;
}
};
std::unique_ptr<Decoder> createZilogMczDecoder(const DecoderProto& config)
{
return std::unique_ptr<Decoder>(new ZilogMczDecoder(config));
return std::unique_ptr<Decoder>(new ZilogMczDecoder(config));
}

View File

@@ -1,8 +1,7 @@
#ifndef ZILOGMCZ_H
#define ZILOGMCZ_H
extern std::unique_ptr<Decoder> createZilogMczDecoder(const DecoderProto& config);
extern std::unique_ptr<Decoder> createZilogMczDecoder(
const DecoderProto& config);
#endif

194
build.lua
View File

@@ -1,194 +0,0 @@
vars.cflags = { "$(CFLAGS)" }
vars.cxxflags = { "$(CXXFLAGS)" }
vars.ldflags = { "-pthread" }
include "build/protobuf.lua"
include "build/dependency.lua"
include "build/tests.lua"
dependency {
name = "fmt_dep",
pkg_config = "fmt",
}
dependency {
name = "stb_dep",
pkg_config = "stb",
fallback = "dep/stb+stb"
}
dependency {
name = "protobuf_dep",
pkg_config = "protobuf"
}
dependency {
name = "zlib_dep",
pkg_config = "zlib"
}
proto_cc_library {
name = "config_lib",
srcs = {
"./lib/common.proto",
"./lib/config.proto",
"./lib/decoders/decoders.proto",
"./lib/drive.proto",
"./lib/encoders/encoders.proto",
"./lib/fl2.proto",
"./lib/fluxsink/fluxsink.proto",
"./lib/fluxsource/fluxsource.proto",
"./lib/imagereader/imagereader.proto",
"./lib/imagewriter/imagewriter.proto",
"./lib/mapper.proto",
"./lib/usb/usb.proto",
"./arch/aeslanier/aeslanier.proto",
"./arch/agat/agat.proto",
"./arch/amiga/amiga.proto",
"./arch/apple2/apple2.proto",
"./arch/brother/brother.proto",
"./arch/c64/c64.proto",
"./arch/f85/f85.proto",
"./arch/fb100/fb100.proto",
"./arch/ibm/ibm.proto",
"./arch/macintosh/macintosh.proto",
"./arch/micropolis/micropolis.proto",
"./arch/mx/mx.proto",
"./arch/northstar/northstar.proto",
"./arch/rolandd20/rolandd20.proto",
"./arch/tids990/tids990.proto",
"./arch/victor9k/victor9k.proto",
"./arch/zilogmcz/zilogmcz.proto",
}
}
clibrary {
name = "protocol_lib",
hdrs = { "./protocol.h" }
}
clibrary {
name = "libfluxengine",
srcs = {
"./arch/aeslanier/decoder.cc",
"./arch/agat/agat.cc",
"./arch/agat/decoder.cc",
"./arch/amiga/amiga.cc",
"./arch/amiga/decoder.cc",
"./arch/amiga/encoder.cc",
"./arch/apple2/decoder.cc",
"./arch/apple2/encoder.cc",
"./arch/brother/decoder.cc",
"./arch/brother/encoder.cc",
"./arch/c64/c64.cc",
"./arch/c64/decoder.cc",
"./arch/c64/encoder.cc",
"./arch/f85/decoder.cc",
"./arch/fb100/decoder.cc",
"./arch/ibm/decoder.cc",
"./arch/ibm/encoder.cc",
"./arch/macintosh/decoder.cc",
"./arch/macintosh/encoder.cc",
"./arch/micropolis/decoder.cc",
"./arch/micropolis/encoder.cc",
"./arch/mx/decoder.cc",
"./arch/northstar/decoder.cc",
"./arch/northstar/encoder.cc",
"./arch/rolandd20/rolandd20.cc",
"./arch/tids990/decoder.cc",
"./arch/tids990/encoder.cc",
"./arch/victor9k/decoder.cc",
"./arch/victor9k/encoder.cc",
"./arch/zilogmcz/decoder.cc",
"./lib/bitmap.cc",
"./lib/bytes.cc",
"./lib/crc.cc",
"./lib/csvreader.cc",
"./lib/decoders/decoders.cc",
"./lib/decoders/fluxdecoder.cc",
"./lib/decoders/fluxmapreader.cc",
"./lib/decoders/fmmfm.cc",
"./lib/encoders/encoders.cc",
"./lib/flags.cc",
"./lib/fluxmap.cc",
"./lib/fluxsink/aufluxsink.cc",
"./lib/fluxsink/fl2fluxsink.cc",
"./lib/fluxsink/fluxsink.cc",
"./lib/fluxsink/hardwarefluxsink.cc",
"./lib/fluxsink/scpfluxsink.cc",
"./lib/fluxsink/vcdfluxsink.cc",
"./lib/fluxsource/cwffluxsource.cc",
"./lib/fluxsource/erasefluxsource.cc",
"./lib/fluxsource/fl2fluxsource.cc",
"./lib/fluxsource/fluxsource.cc",
"./lib/fluxsource/hardwarefluxsource.cc",
"./lib/fluxsource/kryoflux.cc",
"./lib/fluxsource/kryofluxfluxsource.cc",
"./lib/fluxsource/scpfluxsource.cc",
"./lib/fluxsource/testpatternfluxsource.cc",
"./lib/globals.cc",
"./lib/hexdump.cc",
"./lib/image.cc",
"./lib/imagereader/d64imagereader.cc",
"./lib/imagereader/d88imagereader.cc",
"./lib/imagereader/dimimagereader.cc",
"./lib/imagereader/diskcopyimagereader.cc",
"./lib/imagereader/fdiimagereader.cc",
"./lib/imagereader/imagereader.cc",
"./lib/imagereader/imdimagereader.cc",
"./lib/imagereader/imgimagereader.cc",
"./lib/imagereader/jv3imagereader.cc",
"./lib/imagereader/nfdimagereader.cc",
"./lib/imagereader/nsiimagereader.cc",
"./lib/imagereader/td0imagereader.cc",
"./lib/imagewriter/d64imagewriter.cc",
"./lib/imagewriter/d88imagewriter.cc",
"./lib/imagewriter/diskcopyimagewriter.cc",
"./lib/imagewriter/imagewriter.cc",
"./lib/imagewriter/imgimagewriter.cc",
"./lib/imagewriter/ldbsimagewriter.cc",
"./lib/imagewriter/nsiimagewriter.cc",
"./lib/imagewriter/rawimagewriter.cc",
"./lib/imginputoutpututils.cc",
"./lib/ldbs.cc",
"./lib/logger.cc",
"./lib/mapper.cc",
"./lib/proto.cc",
"./lib/readerwriter.cc",
"./lib/sector.cc",
"./lib/usb/fluxengineusb.cc",
"./lib/usb/greaseweazle.cc",
"./lib/usb/greaseweazleusb.cc",
"./lib/usb/serial.cc",
"./lib/usb/usb.cc",
"./lib/usb/usbfinder.cc",
"./lib/utils.cc",
"protocol.h",
},
deps = {
"+config_lib",
"+protocol_lib",
"+fmt_dep",
"+protobuf_dep",
"+zlib_dep",
"dep/libusbp+libusbp",
},
dep_cflags = { "-Ilib", "-Iarch", "-I." },
vars = {
["+cflags"] = { "-Ilib", "-Iarch", "-I." }
}
}
installable {
name = "all",
map = {
["fluxengine"] = "src+fluxengine",
["fluxengine-gui"] = "src/gui+fluxengine",
["brother120tool"] = "tools+brother120tool",
["brother240tool"] = "tools+brother240tool",
["upgrade-flux-file"] = "tools+upgrade-flux-file",
}
}
include "tests/build.lua"

113
build.py Normal file
View File

@@ -0,0 +1,113 @@
from build.ab import export
from build.c import clibrary, cxxlibrary
from build.protobuf import proto, protocc
from build.pkg import package
from build.utils import test
from glob import glob
import config
import re
# Hack for building on Fedora/WSL; executables get the .exe extension,
# build the build system detects it as Linux.
import build.toolchain
toolchain.Toolchain.EXE = "$(EXT)"
package(name="protobuf_lib", package="protobuf")
package(name="z_lib", package="zlib")
package(name="fmt_lib", package="fmt", fallback="dep/fmt")
package(name="sqlite3_lib", package="sqlite3")
clibrary(name="protocol", hdrs={"protocol.h": "./protocol.h"})
corpustests = []
if not glob("../fluxengine-testdata/data"):
print("fluxengine-testdata not found; skipping corpus tests")
else:
corpus = [
("acorndfs", "", "--200"),
("agat", "", ""),
("amiga", "", ""),
("apple2", "", "--140 40track_drive"),
("atarist", "", "--360"),
("atarist", "", "--370"),
("atarist", "", "--400"),
("atarist", "", "--410"),
("atarist", "", "--720"),
("atarist", "", "--740"),
("atarist", "", "--800"),
("atarist", "", "--820"),
("bk", "", ""),
("brother", "", "--120 40track_drive"),
("brother", "", "--240"),
(
"commodore",
"scripts/commodore1541_test.textpb",
"--171 40track_drive",
),
(
"commodore",
"scripts/commodore1541_test.textpb",
"--192 40track_drive",
),
("commodore", "", "--800"),
("commodore", "", "--1620"),
("hplif", "", "--264"),
("hplif", "", "--608"),
("hplif", "", "--616"),
("hplif", "", "--770"),
("ibm", "", "--1200"),
("ibm", "", "--1232"),
("ibm", "", "--1440"),
("ibm", "", "--1680"),
("ibm", "", "--180 40track_drive"),
("ibm", "", "--160 40track_drive"),
("ibm", "", "--320 40track_drive"),
("ibm", "", "--360 40track_drive"),
("ibm", "", "--720_96"),
("ibm", "", "--720_135"),
("mac", "scripts/mac400_test.textpb", "--400"),
("mac", "scripts/mac800_test.textpb", "--800"),
("n88basic", "", ""),
("rx50", "", ""),
("tartu", "", "--390 40track_drive"),
("tartu", "", "--780"),
("tids990", "", ""),
("victor9k", "", "--612"),
("victor9k", "", "--1224"),
]
for c in corpus:
name = re.sub(r"[^a-zA-Z0-9]", "_", "".join(c), 0)
corpustests += [
test(
name=f"corpustest_{name}_{format}",
ins=["src+fluxengine"],
deps=["scripts/encodedecodetest.sh"],
commands=[
"$[deps[0]] "
+ c[0]
+ " "
+ format
+ " $[ins[0]] '"
+ c[1]
+ "' '"
+ c[2]
+ "' $(dir $[outs[0]]) > /dev/null"
],
label="CORPUSTEST",
)
for format in ["scp", "flux"]
]
export(
name="all",
items={
"fluxengine$(EXT)": "src+fluxengine",
"fluxengine-gui$(EXT)": "src/gui",
"brother120tool$(EXT)": "tools+brother120tool",
"brother240tool$(EXT)": "tools+brother240tool",
"upgrade-flux-file$(EXT)": "tools+upgrade-flux-file",
}
| ({"FluxEngine.pkg": "src/gui+fluxengine_pkg"} if config.osx else {}),
deps=["tests", "src/formats+docs", "scripts+mkdocindex"] + corpustests,
)

19
build/_objectify.py Normal file
View File

@@ -0,0 +1,19 @@
import sys
from functools import partial
if len(sys.argv) != 3:
sys.exit("Usage: %s <file> <symbol>" % sys.argv[0])
filename = sys.argv[1]
symbol = sys.argv[2]
print("const uint8_t " + symbol + "[] = {")
n = 0
with open(filename, "rb") as in_file:
for c in iter(partial(in_file.read, 1), b""):
print("0x%02X," % ord(c), end="")
n += 1
if n % 16 == 0:
print()
print("};")
print("const size_t " + symbol + "_len = sizeof(" + symbol + ");")

49
build/_sandbox.py Normal file
View File

@@ -0,0 +1,49 @@
#!/usr/bin/python3
from os.path import *
import argparse
import os
import shutil
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sandbox")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-l", "--link", action="store_true")
parser.add_argument("-e", "--export", action="store_true")
parser.add_argument("files", nargs="*")
args = parser.parse_args()
assert args.sandbox, "You must specify a sandbox directory"
assert args.link ^ args.export, "You can't link and export at the same time"
if args.link:
os.makedirs(args.sandbox, exist_ok=True)
for f in args.files:
sf = join(args.sandbox, f)
if args.verbose:
print("link", sf)
os.makedirs(dirname(sf), exist_ok=True)
try:
os.symlink(abspath(f), sf)
except PermissionError:
shutil.copy(f, sf)
if args.export:
for f in args.files:
sf = join(args.sandbox, f)
if args.verbose:
print("export", sf)
df = dirname(f)
if df:
os.makedirs(df, exist_ok=True)
try:
os.remove(f)
except FileNotFoundError:
pass
os.rename(sf, f)
main()

25
build/_zip.py Executable file
View File

@@ -0,0 +1,25 @@
#!/usr/bin/python3
from os.path import *
import argparse
import os
from zipfile import ZipFile
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-z", "--zipfile")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-f", "--file", nargs=2, action="append")
args = parser.parse_args()
assert args.zipfile, "You must specify a zipfile to create"
with ZipFile(args.zipfile, mode="w") as zf:
for zipname, filename in args.file:
if args.verbose:
print(filename, "->", zipname)
zf.write(filename, arcname=zipname)
main()

111
build/ab.mk Normal file
View File

@@ -0,0 +1,111 @@
MAKENOT4 := $(if $(findstring 3.9999, $(lastword $(sort 3.9999 $(MAKE_VERSION)))),yes,no)
ifeq ($(MAKENOT4),yes)
$(error You need GNU Make 4.x for this (if you're on OSX, use gmake).)
endif
OBJ ?= .obj
PYTHON ?= python3
PKG_CONFIG ?= pkg-config
HOST_PKG_CONFIG ?= $(PKG_CONFIG)
ECHO ?= echo
CP ?= cp
HOSTCC ?= gcc
HOSTCXX ?= g++
HOSTAR ?= ar
HOSTCFLAGS ?= -g -Og
HOSTLDFLAGS ?= -g
CC ?= $(HOSTCC)
CXX ?= $(HOSTCXX)
AR ?= $(HOSTAR)
CFLAGS ?= $(HOSTCFLAGS)
LDFLAGS ?= $(HOSTLDFLAGS)
export PKG_CONFIG
export HOST_PKG_CONFIG
ifdef VERBOSE
hide =
else
ifdef V
hide =
else
hide = @
endif
endif
# If enabled, shows a nice display of how far through the build you are. This
# doubles Make startup time. Also, on Make 4.3 and above, rebuilds don't show
# correct progress information.
AB_ENABLE_PROGRESS_INFO ?= true
WINDOWS := no
OSX := no
LINUX := no
ifeq ($(OS),Windows_NT)
WINDOWS := yes
else
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
LINUX := yes
endif
ifeq ($(UNAME_S),Darwin)
OSX := yes
endif
endif
ifeq ($(OS), Windows_NT)
EXT ?= .exe
endif
EXT ?=
CWD=$(shell pwd)
ifeq ($(AB_ENABLE_PROGRESS_INFO),true)
ifeq ($(PROGRESSINFO),)
# The first make invocation here has to have its output discarded or else it
# produces spurious 'Leaving directory' messages... don't know why.
rulecount := $(strip $(shell $(MAKE) --no-print-directory -q $(OBJ)/build.mk PROGRESSINFO=1 > /dev/null \
&& $(MAKE) --no-print-directory -n $(MAKECMDGOALS) PROGRESSINFO=XXXPROGRESSINFOXXX | grep XXXPROGRESSINFOXXX | wc -l))
ruleindex := 1
PROGRESSINFO = "[$(ruleindex)/$(rulecount)]$(eval ruleindex := $(shell expr $(ruleindex) + 1)) "
endif
else
PROGRESSINFO = ""
endif
PKG_CONFIG_HASHES = $(OBJ)/.pkg-config-hashes/target-$(word 1, $(shell $(PKG_CONFIG) --list-all | md5sum))
HOST_PKG_CONFIG_HASHES = $(OBJ)/.pkg-config-hashes/host-$(word 1, $(shell $(HOST_PKG_CONFIG) --list-all | md5sum))
$(OBJ)/build.mk : $(PKG_CONFIG_HASHES) $(HOST_PKG_CONFIG_HASHES)
$(PKG_CONFIG_HASHES) $(HOST_PKG_CONFIG_HASHES) &:
$(hide) rm -rf $(OBJ)/.pkg-config-hashes
$(hide) mkdir -p $(OBJ)/.pkg-config-hashes
$(hide) touch $(PKG_CONFIG_HASHES) $(HOST_PKG_CONFIG_HASHES)
include $(OBJ)/build.mk
MAKEFLAGS += -r -j$(shell nproc)
.DELETE_ON_ERROR:
.PHONY: update-ab
update-ab:
@echo "Press RETURN to update ab from the repository, or CTRL+C to cancel." \
&& read a \
&& (curl -L https://github.com/davidgiven/ab/releases/download/dev/distribution.tar.xz | tar xvJf -) \
&& echo "Done."
.PHONY: clean
clean::
@echo CLEAN
$(hide) rm -rf $(OBJ)
export PYTHONHASHSEED = 1
build-files = $(shell find . -name 'build.py') $(wildcard build/*.py) $(wildcard config.py)
$(OBJ)/build.mk: Makefile $(build-files) build/ab.mk
@echo "AB"
@mkdir -p $(OBJ)
$(hide) $(PYTHON) -X pycache_prefix=$(OBJ)/__pycache__ build/ab.py -o $@ build.py \
|| rm -f $@

646
build/ab.py Normal file
View File

@@ -0,0 +1,646 @@
from os.path import *
from pathlib import Path
from typing import Iterable
import argparse
import builtins
from copy import copy
import functools
import importlib
import importlib.util
from importlib.machinery import (
SourceFileLoader,
PathFinder,
ModuleSpec,
)
import inspect
import string
import sys
import hashlib
import re
import ast
from collections import namedtuple
VERBOSE_MK_FILE = False
verbose = False
quiet = False
cwdStack = [""]
targets = {}
unmaterialisedTargets = {} # dict, not set, to get consistent ordering
materialisingStack = []
defaultGlobals = {}
globalId = 1
wordCache = {}
RE_FORMAT_SPEC = re.compile(
r"(?:(?P<fill>[\s\S])?(?P<align>[<>=^]))?"
r"(?P<sign>[- +])?"
r"(?P<pos_zero>z)?"
r"(?P<alt>#)?"
r"(?P<zero_padding>0)?"
r"(?P<width_str>\d+)?"
r"(?P<grouping>[_,])?"
r"(?:(?P<decimal>\.)(?P<precision_str>\d+))?"
r"(?P<type>[bcdeEfFgGnosxX%])?"
)
CommandFormatSpec = namedtuple(
"CommandFormatSpec", RE_FORMAT_SPEC.groupindex.keys()
)
sys.path += ["."]
old_import = builtins.__import__
class PathFinderImpl(PathFinder):
def find_spec(self, fullname, path, target=None):
# The second test here is needed for Python 3.9.
if not path or not path[0]:
path = ["."]
if len(path) != 1:
return None
try:
path = relpath(path[0])
except ValueError:
return None
realpath = fullname.replace(".", "/")
buildpath = realpath + ".py"
if isfile(buildpath):
spec = importlib.util.spec_from_file_location(
name=fullname,
location=buildpath,
loader=BuildFileLoaderImpl(fullname=fullname, path=buildpath),
submodule_search_locations=[],
)
return spec
if isdir(realpath):
return ModuleSpec(fullname, None, origin=realpath, is_package=True)
return None
class BuildFileLoaderImpl(SourceFileLoader):
def exec_module(self, module):
sourcepath = relpath(module.__file__)
if not quiet:
print("loading", sourcepath)
cwdStack.append(dirname(sourcepath))
super(SourceFileLoader, self).exec_module(module)
cwdStack.pop()
sys.meta_path.insert(0, PathFinderImpl())
class ABException(BaseException):
pass
def error(message):
raise ABException(message)
class BracketedFormatter(string.Formatter):
def parse(self, format_string):
while format_string:
left, *right = format_string.split("$[", 1)
if not right:
yield (left, None, None, None)
break
right = right[0]
offset = len(right) + 1
try:
ast.parse(right)
except SyntaxError as e:
if not str(e).startswith("unmatched ']'"):
raise e
offset = e.offset
expr = right[0 : offset - 1]
format_string = right[offset:]
yield (left if left else None, expr, None, None)
def Rule(func):
sig = inspect.signature(func)
@functools.wraps(func)
def wrapper(*, name=None, replaces=None, **kwargs):
cwd = None
if "cwd" in kwargs:
cwd = kwargs["cwd"]
del kwargs["cwd"]
if not cwd:
if replaces:
cwd = replaces.cwd
else:
cwd = cwdStack[-1]
if name:
if name[0] != "+":
name = "+" + name
t = Target(cwd, join(cwd, name))
assert (
t.name not in targets
), f"target {t.name} has already been defined"
targets[t.name] = t
elif replaces:
t = replaces
else:
raise ABException("you must supply either 'name' or 'replaces'")
t.cwd = cwd
t.types = func.__annotations__
t.callback = func
t.traits.add(func.__name__)
if "args" in kwargs:
t.explicit_args = kwargs["args"]
t.args.update(t.explicit_args)
del kwargs["args"]
if "traits" in kwargs:
t.traits |= kwargs["traits"]
del kwargs["traits"]
t.binding = sig.bind(name=name, self=t, **kwargs)
t.binding.apply_defaults()
unmaterialisedTargets[t] = None
if replaces:
t.materialise(replacing=True)
return t
defaultGlobals[func.__name__] = wrapper
return wrapper
def _isiterable(xs):
return isinstance(xs, Iterable) and not isinstance(
xs, (str, bytes, bytearray)
)
class Target:
def __init__(self, cwd, name):
if verbose:
print("rule('%s', cwd='%s'" % (name, cwd))
self.name = name
self.localname = self.name.rsplit("+")[-1]
self.traits = set()
self.dir = join("$(OBJ)", name)
self.ins = []
self.outs = []
self.deps = []
self.materialised = False
self.args = {}
def __eq__(self, other):
return self.name is other.name
def __lt__(self, other):
return self.name < other.name
def __hash__(self):
return id(self)
def __repr__(self):
return f"Target('{self.name}')"
def templateexpand(selfi, s):
class Formatter(BracketedFormatter):
def get_field(self, name, a1, a2):
return (
eval(name, selfi.callback.__globals__, selfi.args),
False,
)
def format_field(self, value, format_spec):
if not value:
return ""
if type(value) == str:
return value
if _isiterable(value):
value = list(value)
if type(value) != list:
value = [value]
return " ".join(
[selfi.templateexpand(f) for f in filenamesof(value)]
)
return Formatter().format(s)
def materialise(self, replacing=False):
if self not in unmaterialisedTargets:
return
if not replacing and self in materialisingStack:
print("Found dependency cycle:")
for i in materialisingStack:
print(f" {i.name}")
print(f" {self.name}")
sys.exit(1)
materialisingStack.append(self)
# Perform type conversion to the declared rule parameter types.
try:
for k, v in self.binding.arguments.items():
if k != "kwargs":
t = self.types.get(k, None)
if t:
v = t.convert(v, self)
self.args[k] = copy(v)
else:
for kk, vv in v.items():
t = self.types.get(kk, None)
if t:
vv = t.convert(v, self)
self.args[kk] = copy(vv)
self.args["name"] = self.name
self.args["dir"] = self.dir
self.args["self"] = self
# Actually call the callback.
cwdStack.append(self.cwd)
if "kwargs" in self.binding.arguments.keys():
# If the caller wants kwargs, return all arguments except the standard ones.
cbargs = {
k: v for k, v in self.args.items() if k not in {"dir"}
}
else:
# Otherwise, just call the callback with the ones it asks for.
cbargs = {}
for k in self.binding.arguments.keys():
if k != "kwargs":
try:
cbargs[k] = self.args[k]
except KeyError:
error(
f"invocation of {self} failed because {k} isn't an argument"
)
self.callback(**cbargs)
cwdStack.pop()
except BaseException as e:
print(f"Error materialising {self}: {self.callback}")
print(f"Arguments: {self.args}")
raise e
if self.outs is None:
raise ABException(f"{self.name} didn't set self.outs")
if self in unmaterialisedTargets:
del unmaterialisedTargets[self]
materialisingStack.pop()
self.materialised = True
def convert(value, target):
if not value:
return None
return target.targetof(value)
def targetof(self, value):
if isinstance(value, str) and (value[0] == "="):
value = join(self.dir, value[1:])
return targetof(value, self.cwd)
def _filetarget(value, cwd):
if value in targets:
return targets[value]
t = Target(cwd, value)
t.outs = [value]
targets[value] = t
return t
def targetof(value, cwd=None):
if not cwd:
cwd = cwdStack[-1]
if isinstance(value, Path):
value = value.as_posix()
if isinstance(value, Target):
t = value
else:
assert (
value[0] != "="
), "can only use = for targets associated with another target"
if value.startswith("."):
# Check for local rule.
if value.startswith(".+"):
value = normpath(join(cwd, value[1:]))
# Check for local path.
elif value.startswith("./"):
value = normpath(join(cwd, value))
# Explicit directories are always raw files.
elif value.endswith("/"):
return _filetarget(value, cwd)
# Anything starting with a variable expansion is always a raw file.
elif value.startswith("$"):
return _filetarget(value, cwd)
# If this is not a rule lookup...
if "+" not in value:
# ...and if the value is pointing at a directory without a trailing /,
# it's a shorthand rule lookup.
if isdir(value):
value = value + "+" + basename(value)
# Otherwise it's an absolute file.
else:
return _filetarget(value, cwd)
# At this point we have the fully qualified name of a rule.
(path, target) = value.rsplit("+", 1)
value = join(path, "+" + target)
if value not in targets:
# Load the new build file.
path = join(path, "build.py")
try:
loadbuildfile(path)
except ModuleNotFoundError:
error(
f"no such build file '{path}' while trying to resolve '{value}'"
)
assert (
value in targets
), f"build file at '{path}' doesn't contain '+{target}' when trying to resolve '{value}'"
t = targets[value]
t.materialise()
return t
class Targets:
def convert(value, target):
if not value:
return []
assert _isiterable(value), "cannot convert non-list to Targets"
return [target.targetof(x) for x in flatten(value)]
class TargetsMap:
def convert(value, target):
if not value:
return {}
output = {k: target.targetof(v) for k, v in value.items()}
for k, v in output.items():
assert (
len(filenamesof([v])) == 1
), f"targets of a TargetsMap used as an argument of {target} with key '{k}' must contain precisely one output file, but was {filenamesof([v])}"
return output
def _removesuffix(self, suffix):
# suffix='' should not call self[:-0].
if suffix and self.endswith(suffix):
return self[: -len(suffix)]
else:
return self[:]
def loadbuildfile(filename):
modulename = _removesuffix(filename.replace("/", "."), ".py")
if modulename not in sys.modules:
spec = importlib.util.spec_from_file_location(
name=modulename,
location=filename,
loader=BuildFileLoaderImpl(fullname=modulename, path=filename),
submodule_search_locations=[],
)
module = importlib.util.module_from_spec(spec)
sys.modules[modulename] = module
spec.loader.exec_module(module)
def flatten(items):
def generate(xs):
for x in xs:
if _isiterable(x):
yield from generate(x)
else:
yield x
return list(generate(items))
def targetnamesof(items):
assert _isiterable(items), "argument of filenamesof is not a collection"
return [t.name for t in items]
def filenamesof(items):
assert _isiterable(items), "argument of filenamesof is not a collection"
def generate(xs):
for x in xs:
if isinstance(x, Target):
x.materialise()
yield from generate(x.outs)
else:
yield x
return list(generate(items))
def filenameof(x):
xs = filenamesof(x.outs)
assert (
len(xs) == 1
), f"tried to use filenameof() on {x} which does not have exactly one output: {x.outs}"
return xs[0]
def emit(*args, into=None):
s = " ".join(args) + "\n"
if into is not None:
into += [s]
else:
outputFp.write(s)
def emit_rule(self, ins, outs, cmds=[], label=None):
name = self.name
fins_list = filenamesof(ins)
fins = set(fins_list)
fouts = filenamesof(outs)
nonobjs = [f for f in fouts if not f.startswith("$(OBJ)")]
emit("")
if VERBOSE_MK_FILE:
for k, v in self.args.items():
emit(f"# {k} = {v}")
lines = []
if nonobjs:
emit("clean::", into=lines)
emit("\t$(hide) rm -f", *nonobjs, into=lines)
hashable = cmds + fins_list + fouts
hash = hashlib.sha1(bytes("\n".join(hashable), "utf-8")).hexdigest()
hashfile = join(self.dir, f"hash_{hash}")
global globalId
emit(".PHONY:", name, into=lines)
if outs:
outsn = globalId
globalId = globalId + 1
insn = globalId
globalId = globalId + 1
emit(f"OUTS_{outsn}", "=", *fouts, into=lines)
emit(f"INS_{insn}", "=", *fins, into=lines)
emit(name, ":", f"$(OUTS_{outsn})", into=lines)
emit(hashfile, ":", into=lines)
emit(f"\t@mkdir -p {self.dir}", into=lines)
emit(f"\t@touch {hashfile}", into=lines)
emit(
f"$(OUTS_{outsn})",
"&:" if len(fouts) > 1 else ":",
f"$(INS_{insn})",
hashfile,
into=lines,
)
if label:
emit("\t$(hide)", "$(ECHO) $(PROGRESSINFO)" + label, into=lines)
sandbox = join(self.dir, "sandbox")
emit("\t$(hide)", f"rm -rf {sandbox}", into=lines)
emit(
"\t$(hide)",
"$(PYTHON) build/_sandbox.py --link -s",
sandbox,
f"$(INS_{insn})",
into=lines,
)
for c in cmds:
emit(f"\t$(hide) cd {sandbox} && (", c, ")", into=lines)
emit(
"\t$(hide)",
"$(PYTHON) build/_sandbox.py --export -s",
sandbox,
f"$(OUTS_{outsn})",
into=lines,
)
else:
assert len(cmds) == 0, "rules with no outputs cannot have commands"
emit(name, ":", *fins, into=lines)
outputFp.write("".join(lines))
emit("")
@Rule
def simplerule(
self,
name,
ins: Targets = [],
outs: Targets = [],
deps: Targets = [],
commands=[],
label="RULE",
):
self.ins = ins
self.outs = outs
self.deps = deps
dirs = []
cs = []
for out in filenamesof(outs):
dir = dirname(out)
if dir and dir not in dirs:
dirs += [dir]
cs = [("mkdir -p %s" % dir) for dir in dirs]
for c in commands:
cs += [self.templateexpand(c)]
emit_rule(
self=self,
ins=ins + deps,
outs=outs,
label=self.templateexpand("$[label] $[name]") if label else None,
cmds=cs,
)
@Rule
def export(self, name=None, items: TargetsMap = {}, deps: Targets = []):
ins = []
outs = []
for dest, src in items.items():
dest = self.targetof(dest)
outs += [dest]
destf = filenameof(dest)
srcs = filenamesof([src])
assert (
len(srcs) == 1
), "a dependency of an exported file must have exactly one output file"
subrule = simplerule(
name=f"{self.localname}/{destf}",
cwd=self.cwd,
ins=[srcs[0]],
outs=[destf],
commands=["$(CP) -H %s %s" % (srcs[0], destf)],
label="",
)
subrule.materialise()
self.ins = []
self.outs = deps + outs
emit("")
emit(".PHONY:", name)
emit(name, ":", *filenamesof(outs + deps))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("-o", "--output")
parser.add_argument("files", nargs="+")
args = parser.parse_args()
global verbose
verbose = args.verbose
global quiet
quiet = args.quiet
global outputFp
outputFp = open(args.output, "wt")
for k in ["Rule"]:
defaultGlobals[k] = globals()[k]
global __name__
sys.modules["build.ab"] = sys.modules[__name__]
__name__ = "build.ab"
for f in args.files:
loadbuildfile(f)
while unmaterialisedTargets:
t = next(iter(unmaterialisedTargets))
t.materialise()
emit("AB_LOADED = 1\n")
main()

View File

@@ -1,251 +0,0 @@
local OBJDIR = "$(OBJDIR)"
local function objdir(e)
return concatpath(OBJDIR, e.cwd, e.name)
end
definerule("normalrule",
{
ins = { type="targets" },
deps = { type="targets", default={} },
outs = { type="targets", default={} },
outleaves = { type="strings" },
label = { type="string", optional=true },
objdir = { type="string", optional=true },
commands = { type="strings" },
},
function (e)
local dir = e.objdir or objdir(e)
local realouts = {}
for _, v in pairs(e.outleaves) do
realouts[#realouts+1] = concatpath(dir, v)
end
local vars = inherit(e.vars, {
dir = dir
})
local result = simplerule {
name = e.name,
ins = e.ins,
deps = e.deps,
outs = concat(realouts, filenamesof(e.outs)),
label = e.label,
commands = e.commands,
vars = vars,
}
result.dir = dir
return result
end
)
local function is_clike(f)
return f:find("%.c$") or f:find("%.cc$") or f:find("%.cpp$")
end
definerule("cfile",
{
srcs = { type="targets" },
deps = { type="targets", default={} }
},
function (e)
local cflags = e.vars.cflags
local cxxflags = e.vars.cxxflags
for _, target in ipairs(targetsof(e.deps)) do
if target.is.clibrary then
cflags = concat(cflags, target.dep_cflags)
cxxflags = concat(cxxflags, target.dep_cxxflags)
end
end
local src = filter(filenamesof(e.srcs), is_clike)
local cmd
local cxx = false
if src[1]:find("%.c$") then
cmd = "$(CC) -c -o %{outs[1]} %{ins[1]} %{hdrpaths} %{cflags}"
else
cmd = "$(CXX) -c -o %{outs[1]} %{ins[1]} %{hdrpaths} %{cflags} %{cxxflags}"
cxx = true
end
local outleaf = basename(e.name)..".o"
local rule = normalrule {
name = e.name,
cwd = e.cwd,
ins = e.srcs,
deps = e.deps,
outleaves = {outleaf},
label = e.label,
commands = cmd,
vars = {
hdrpaths = {},
cflags = cflags,
cxxflags = cxxflags,
}
}
rule.is.cxxfile = cxx
return rule
end
)
local function do_cfiles(e)
local outs = {}
local srcs = filenamesof(e.srcs)
for _, f in ipairs(sorted(filter(srcs, is_clike))) do
local ofile
if f:find(OBJDIR, 1, true) == 1 then
ofile = e.name.."/"..f:sub(#OBJDIR+1)..".o"
else
ofile = e.name.."/"..f..".o"
end
outs[#outs+1] = cfile {
name = ofile,
srcs = { f },
deps = e.deps
}
end
return outs
end
definerule("clibrary",
{
srcs = { type="targets", default={} },
deps = { type="targets", default={} },
hdrs = { type="targets", default={} },
dep_cflags = { type="strings", default={} },
dep_cxxflags = { type="strings", default={} },
dep_ldflags = { type="strings", default={} },
dep_libs = { type="strings", default={} },
},
function (e)
local ins = do_cfiles(e)
local cxx = false
for _, f in ipairs(ins) do
if f.is.cxxfile then
cxx = true
break
end
end
local mkdirs = {}
local copies = {}
local outs = {}
local function copy_file(src, dest)
mkdirs[#mkdirs+1] = "mkdir -p %{dir}/"..dirname(dest)
copies[#copies+1] = "cp "..src.." %{dir}/"..dest
outs[#outs+1] = objdir(e).."/"..dest
end
local deps = {}
for k, v in pairs(e.hdrs) do
deps[#deps+1] = v
if type(k) == "number" then
v = filenamesof(v)
for _, v in ipairs(v) do
if not startswith(e.cwd, v) then
error(string.format("filename '%s' is not local to '%s' --- "..
"you'll have to specify the output filename manually", v, e.cwd))
end
copy_file(v, v:gsub("^"..e.cwd, ""))
end
else
v = filenamesof(v)
if #v ~= 1 then
error("each mapped hdrs item can only cope with a single file")
end
copy_file(v[1], k)
end
end
ins = sorted(filenamesof(ins))
local has_ar = (#ins ~= 0)
local lib = normalrule {
name = e.name,
cwd = e.cwd,
ins = sorted(filenamesof(ins)),
deps = deps,
outs = outs,
outleaves = { e.name..".a" },
label = e.label,
commands = {
sorted(mkdirs),
sorted(copies),
has_ar and "rm -f %{outs[1]} && $(AR) cqs %{outs[1]} %{ins}" or {},
}
}
lib.dep_cflags = concat(e.dep_cflags, "-I"..lib.dir)
lib.dep_cxxflags = e.dep_cxxflags
lib.dep_ldflags = e.dep_ldflags
lib.dep_libs = concat(e.dep_libs, has_ar and matching(filenamesof(lib), "%.a$") or {})
lib.dep_cxx = cxx
for _, d in pairs(targetsof(e.deps)) do
lib.dep_cflags = concat(lib.dep_cflags, d.dep_cflags)
lib.dep_cxxflags = concat(lib.dep_cxxflags, d.dep_cxxflags)
lib.dep_ldflags = concat(lib.dep_ldflags, d.dep_ldflags)
lib.dep_libs = concat(lib.dep_libs, d.dep_libs)
lib.dep_cxx = lib.dep_cxx or d.dep_cxx
end
return lib
end
)
definerule("cprogram",
{
srcs = { type="targets", default={} },
deps = { type="targets", default={} },
},
function (e)
local deps = e.deps
local ins = {}
local cxx = false
if (#e.srcs > 0) then
local objs = do_cfiles(e)
for _, obj in pairs(objs) do
if obj.is.cxxfile then
cxx = true
end
ins[#ins+1] = obj
end
end
local libs = {}
local cflags = {}
local cxxflags = {}
local ldflags = {}
for _, lib in pairs(e.deps) do
cflags = concat(cflags, lib.dep_cflags)
cxxflags = concat(cxxflags, lib.dep_cxxflags)
ldflags = concat(ldflags, lib.dep_ldflags)
libs = concat(libs, lib.dep_libs)
cxx = cxx or lib.dep_cxx
end
local command
if cxx then
command = "$(CXX) $(LDFLAGS) %{ldflags} -o %{outs[1]} %{ins} %{libs} %{libs}"
else
command = "$(CC) $(LDFLAGS) %{ldflags} -o %{outs[1]} %{ins} %{libs} %{libs}"
end
return normalrule {
name = e.name,
cwd = e.cwd,
deps = deps,
ins = ins,
outleaves = { e.name },
commands = { command },
vars = {
cflags = cflags,
cxxflags = cxxflags,
ldflags = ldflags,
libs = libs,
}
}
end
)

597
build/c.py Normal file
View File

@@ -0,0 +1,597 @@
from build.ab import (
Rule,
Targets,
TargetsMap,
filenameof,
filenamesof,
flatten,
simplerule,
emit,
)
from build.utils import filenamesmatchingof, stripext, collectattrs
from build.toolchain import Toolchain, HostToolchain
from os.path import *
emit(
"""
ifeq ($(OSX),no)
STARTGROUP ?= -Wl,--start-group
ENDGROUP ?= -Wl,--end-group
endif
"""
)
Toolchain.CC = ["$(CC) -c -o $[outs[0]] $[ins[0]] $(CFLAGS) $[cflags]"]
Toolchain.CPP = ["$(CC) -E -P -o $[outs] $[cflags] -x c $[ins]"]
Toolchain.CXX = ["$(CXX) -c -o $[outs[0]] $[ins[0]] $(CFLAGS) $[cflags]"]
Toolchain.AR = ["$(AR) cqs $[outs[0]] $[ins]"]
Toolchain.ARXX = ["$(AR) cqs $[outs[0]] $[ins]"]
Toolchain.CLINK = [
"$(CC) -o $[outs[0]] $(STARTGROUP) $[ins] $[ldflags] $(LDFLAGS) $(ENDGROUP)"
]
Toolchain.CXXLINK = [
"$(CXX) -o $[outs[0]] $(STARTGROUP) $[ins] $[ldflags] $(LDFLAGS) $(ENDGROUP)"
]
Toolchain.is_source_file = (
lambda f: f.endswith(".c")
or f.endswith(".cc")
or f.endswith(".cpp")
or f.endswith(".S")
or f.endswith(".s")
or f.endswith(".m")
or f.endswith(".mm")
)
# Given a set of dependencies, finds the set of relevant library targets (i.e.
# contributes *.a files) for compiling C programs. The actual list of libraries
# is in dep.clibrary_files.
def _toolchain_find_library_targets(deps):
lib_deps = []
for d in deps:
lib_deps = _combine(lib_deps, d.args.get("clibrary_deps", []))
return lib_deps
Toolchain.find_c_library_targets = _toolchain_find_library_targets
# Given a set of dependencies, finds the set of relevant header targets (i.e.
# contributes *.h files) for compiling C programs. The actual list of libraries
# is in dep.cheader_files.
def _toolchain_find_header_targets(deps, initial=[]):
hdr_deps = initial
for d in deps:
hdr_deps = _combine(hdr_deps, d.args.get("cheader_deps", []))
return hdr_deps
Toolchain.find_c_header_targets = _toolchain_find_header_targets
HostToolchain.CC = [
"$(HOSTCC) -c -o $[outs[0]] $[ins[0]] $(HOSTCFLAGS) $[cflags]"
]
HostToolchain.CPP = ["$(HOSTCC) -E -P -o $[outs] $[cflags] -x c $[ins]"]
HostToolchain.CXX = [
"$(HOSTCXX) -c -o $[outs[0]] $[ins[0]] $(HOSTCFLAGS) $[cflags]"
]
HostToolchain.AR = ["$(HOSTAR) cqs $[outs[0]] $[ins]"]
HostToolchain.ARXX = ["$(HOSTAR) cqs $[outs[0]] $[ins]"]
HostToolchain.CLINK = [
"$(HOSTCC) -o $[outs[0]] $(STARTGROUP) $[ins] $[ldflags] $(HOSTLDFLAGS) $(ENDGROUP)"
]
HostToolchain.CXXLINK = [
"$(HOSTCXX) -o $[outs[0]] $(STARTGROUP) $[ins] $[ldflags] $(HOSTLDFLAGS) $(ENDGROUP)"
]
def _combine(list1, list2):
r = list(list1)
for i in list2:
if i not in r:
r.append(i)
return r
def _indirect(deps, name):
r = []
for d in deps:
r = _combine(r, d.args.get(name, [d]))
return r
def cfileimpl(
self, name, srcs, deps, suffix, commands, label, toolchain, cflags
):
outleaf = "=" + stripext(basename(filenameof(srcs[0]))) + suffix
hdr_deps = toolchain.find_c_header_targets(deps)
other_deps = [
d
for d in deps
if ("cheader_deps" not in d.args) and ("clibrary_deps" not in d.args)
]
hdr_files = collectattrs(targets=hdr_deps, name="cheader_files")
cflags = collectattrs(
targets=hdr_deps, name="caller_cflags", initial=cflags
)
t = simplerule(
replaces=self,
ins=srcs,
deps=other_deps + hdr_files,
outs=[outleaf],
label=label,
commands=commands,
args={"cflags": cflags},
)
@Rule
def cfile(
self,
name,
srcs: Targets = None,
deps: Targets = None,
cflags=[],
suffix=".o",
toolchain=Toolchain,
label="CC",
):
cfileimpl(
self,
name,
srcs,
deps,
suffix,
toolchain.CC,
toolchain.PREFIX + label,
toolchain,
cflags,
)
@Rule
def cxxfile(
self,
name,
srcs: Targets = None,
deps: Targets = None,
cflags=[],
suffix=".o",
toolchain=Toolchain,
label="CXX",
):
cfileimpl(
self,
name,
srcs,
deps,
suffix,
toolchain.CXX,
toolchain.PREFIX + label,
toolchain,
cflags,
)
def _removeprefix(self, prefix):
if self.startswith(prefix):
return self[len(prefix) :]
else:
return self[:]
def findsources(self, srcs, deps, cflags, filerule, toolchain, cwd):
for f in filenamesof(srcs):
if not toolchain.is_source_file(f):
cflags = cflags + [f"-I{dirname(f)}"]
deps = deps + [f]
objs = []
for s in flatten(srcs):
objs += [
filerule(
name=join(self.localname, _removeprefix(f, "$(OBJ)/")),
srcs=[f],
deps=deps,
cflags=sorted(set(cflags)),
toolchain=toolchain,
cwd=cwd,
args=getattr(self, "explicit_args", {}),
)
for f in filenamesof([s])
if toolchain.is_source_file(f)
]
if any(f.endswith(".o") for f in filenamesof([s])):
objs += [s]
return objs
def libraryimpl(
self,
name,
srcs,
deps,
hdrs,
caller_cflags,
caller_ldflags,
cflags,
ldflags,
toolchain,
commands,
label,
filerule,
):
hdr_deps = toolchain.find_c_header_targets(deps) + [self]
lib_deps = toolchain.find_c_library_targets(deps) + [self]
hr = None
hf = []
ar = None
if hdrs:
cs = []
ins = hdrs.values()
outs = []
i = 0
for dest, src in hdrs.items():
s = filenamesof([src])
assert (
len(s) == 1
), "the target of a header must return exactly one file"
cs += [f"$(CP) $[ins[{i}]] $[outs[{i}]]"]
outs += ["=" + dest]
i = i + 1
hr = simplerule(
name=f"{self.localname}_hdr",
ins=ins,
outs=outs,
commands=cs,
label=toolchain.PREFIX + "CHEADERS",
)
hr.args["cheader_deps"] = [hr]
hr.args["cheader_files"] = [hr]
hf = [f"-I{hr.dir}"]
if srcs:
# Can't depend on the current target to get the library headers, because
# if we do it'll cause a dependency loop.
objs = findsources(
self,
srcs,
deps + ([hr] if hr else []),
cflags + hf,
filerule,
toolchain,
self.cwd,
)
ar = simplerule(
name=f"{self.localname}_lib",
ins=objs,
outs=[f"={self.localname}.a"],
deps=deps,
label=label,
commands=commands,
)
ar.materialise()
self.outs = ([hr] if hr else []) + ([ar] if ar else [])
self.deps = self.outs
self.args["cheader_deps"] = hdr_deps
self.args["clibrary_deps"] = lib_deps
self.args["cheader_files"] = [hr] if hr else []
self.args["clibrary_files"] = [ar] if ar else []
self.args["caller_cflags"] = caller_cflags + hf
self.args["caller_ldflags"] = caller_ldflags
@Rule
def clibrary(
self,
name,
srcs: Targets = None,
deps: Targets = None,
hdrs: TargetsMap = None,
caller_cflags=[],
caller_ldflags=[],
cflags=[],
ldflags=[],
toolchain=Toolchain,
label="LIB",
cfilerule=cfile,
):
libraryimpl(
self,
name,
srcs,
deps,
hdrs,
caller_cflags,
caller_ldflags,
cflags,
ldflags,
toolchain,
toolchain.AR,
toolchain.PREFIX + label,
cfilerule,
)
@Rule
def hostclibrary(
self,
name,
srcs: Targets = None,
deps: Targets = None,
hdrs: TargetsMap = None,
caller_cflags=[],
caller_ldflags=[],
cflags=[],
ldflags=[],
toolchain=HostToolchain,
label="LIB",
cfilerule=cfile,
):
libraryimpl(
self,
name,
srcs,
deps,
hdrs,
caller_cflags,
caller_ldflags,
cflags,
ldflags,
toolchain,
toolchain.AR,
toolchain.PREFIX + label,
cfilerule,
)
@Rule
def cxxlibrary(
self,
name,
srcs: Targets = None,
deps: Targets = None,
hdrs: TargetsMap = None,
caller_cflags=[],
caller_ldflags=[],
cflags=[],
ldflags=[],
toolchain=Toolchain,
label="CXXLIB",
cxxfilerule=cxxfile,
):
libraryimpl(
self,
name,
srcs,
deps,
hdrs,
caller_cflags,
caller_ldflags,
cflags,
ldflags,
toolchain,
toolchain.ARXX,
toolchain.PREFIX + label,
cxxfilerule,
)
@Rule
def hostcxxlibrary(
self,
name,
srcs: Targets = None,
deps: Targets = None,
hdrs: TargetsMap = None,
caller_cflags=[],
caller_ldflags=[],
cflags=[],
ldflags=[],
toolchain=HostToolchain,
label="CXXLIB",
cxxfilerule=cxxfile,
):
libraryimpl(
self,
name,
srcs,
deps,
hdrs,
caller_cflags,
caller_ldflags,
cflags,
ldflags,
toolchain,
toolchain.ARXX,
toolchain.PREFIX + label,
cxxfilerule,
)
def programimpl(
self,
name,
srcs,
deps,
cflags,
ldflags,
toolchain,
commands,
label,
filerule,
):
cfiles = findsources(
self, srcs, deps, cflags, filerule, toolchain, self.cwd
)
lib_deps = toolchain.find_c_library_targets(deps)
libs = collectattrs(targets=lib_deps, name="clibrary_files")
ldflags = collectattrs(
targets=lib_deps, name="caller_ldflags", initial=ldflags
)
simplerule(
replaces=self,
ins=cfiles + libs,
outs=[f"={self.localname}{toolchain.EXE}"],
deps=deps,
label=label,
commands=commands,
args={"ldflags": ldflags},
)
@Rule
def cprogram(
self,
name,
srcs: Targets = None,
deps: Targets = None,
cflags=[],
ldflags=[],
toolchain=Toolchain,
label="CLINK",
cfilerule=cfile,
):
programimpl(
self,
name,
srcs,
deps,
cflags,
ldflags,
toolchain,
toolchain.CLINK,
toolchain.PREFIX + label,
cfilerule,
)
@Rule
def hostcprogram(
self,
name,
srcs: Targets = None,
deps: Targets = None,
cflags=[],
ldflags=[],
toolchain=HostToolchain,
label="CLINK",
cfilerule=cfile,
):
programimpl(
self,
name,
srcs,
deps,
cflags,
ldflags,
toolchain,
toolchain.CLINK,
toolchain.PREFIX + label,
cfilerule,
)
@Rule
def cxxprogram(
self,
name,
srcs: Targets = None,
deps: Targets = None,
cflags=[],
ldflags=[],
toolchain=Toolchain,
label="CXXLINK",
cxxfilerule=cxxfile,
):
programimpl(
self,
name,
srcs,
deps,
cflags,
ldflags,
toolchain,
toolchain.CXXLINK,
toolchain.PREFIX + label,
cxxfilerule,
)
@Rule
def hostcxxprogram(
self,
name,
srcs: Targets = None,
deps: Targets = None,
cflags=[],
ldflags=[],
toolchain=HostToolchain,
label="CXXLINK",
cxxfilerule=cxxfile,
):
programimpl(
self,
name,
srcs,
deps,
cflags,
ldflags,
toolchain,
toolchain.CXXLINK,
toolchain.PREFIX + label,
cxxfilerule,
)
def _cppfileimpl(self, name, srcs, deps, cflags, toolchain):
hdr_deps = _indirect(deps, "cheader_deps")
cflags = collectattrs(
targets=hdr_deps, name="caller_cflags", initial=cflags
)
simplerule(
replaces=self,
ins=srcs,
outs=[f"={self.localname}"],
deps=deps,
commands=toolchain.CPP,
args={"cflags": cflags},
label=toolchain.PREFIX + "CPPFILE",
)
@Rule
def cppfile(
self,
name,
srcs: Targets = [],
deps: Targets = [],
cflags=[],
toolchain=Toolchain,
):
_cppfileimpl(self, name, srcs, deps, cflags, toolchain)
@Rule
def hostcppfile(
self,
name,
srcs: Targets = [],
deps: Targets = [],
cflags=[],
toolchain=HostToolchain,
):
_cppfileimpl(self, name, srcs, deps, cflags, toolchain)

85
build/pkg.py Normal file
View File

@@ -0,0 +1,85 @@
from build.ab import Rule, Target
import os
import subprocess
class _PkgConfig:
package_present = set()
package_properties = {}
pkgconfig = None
def __init__(self, cmd):
assert cmd, "no pkg-config environment variable supplied"
self.pkgconfig = cmd
r = subprocess.run(f"{cmd} --list-all", shell=True, capture_output=True)
ps = r.stdout.decode("utf-8")
self.package_present = {l.split(" ", 1)[0] for l in ps.splitlines()}
def has_package(self, name):
return name in self.package_present
def get_property(self, name, flag):
p = f"{name}.{flag}"
if p not in self.package_properties:
r = subprocess.run(
f"{self.pkgconfig} {flag} {name}",
shell=True,
capture_output=True,
)
self.package_properties[p] = r.stdout.decode("utf-8").strip()
return self.package_properties[p]
TargetPkgConfig = _PkgConfig(os.getenv("PKG_CONFIG"))
HostPkgConfig = _PkgConfig(os.getenv("HOST_PKG_CONFIG"))
def _package(self, name, package, fallback, pkgconfig):
if pkgconfig.has_package(package):
cflags = pkgconfig.get_property(package, "--cflags")
ldflags = pkgconfig.get_property(package, "--libs")
if cflags:
self.args["caller_cflags"] = [cflags]
if ldflags:
self.args["caller_ldflags"] = [ldflags]
self.args["clibrary_deps"] = [self]
self.args["cheader_deps"] = [self]
self.traits.update({"clibrary", "cxxlibrary"})
return
assert (
fallback
), f"Required package '{package}' not installed when materialising target '$[name]'"
if "cheader_deps" in fallback.args:
self.args["cheader_deps"] = fallback.args["cheader_deps"]
if "clibrary_deps" in fallback.args:
self.args["clibrary_deps"] = fallback.args["clibrary_deps"]
if "cheader_files" in fallback.args:
self.args["cheader_files"] = fallback.args["cheader_files"]
if "clibrary_files" in fallback.args:
self.args["clibrary_files"] = fallback.args["clibrary_files"]
self.ins = fallback.ins
self.outs = fallback.outs
self.deps = fallback.deps
self.traits = fallback.traits
@Rule
def package(self, name, package=None, fallback: Target = None):
_package(self, name, package, fallback, TargetPkgConfig)
@Rule
def hostpackage(self, name, package=None, fallback: Target = None):
_package(self, name, package, fallback, HostPkgConfig)
def has_package(name):
return TargetPkgConfig.has_package(name)
def has_host_package(name):
return HostPkgConfig.has_package(name)

184
build/protobuf.py Normal file
View File

@@ -0,0 +1,184 @@
from build.ab import Rule, Targets, emit, simplerule, filenamesof
from build.utils import filenamesmatchingof, collectattrs
from os.path import join, abspath, dirname, relpath
from build.pkg import has_package
emit(
"""
PROTOC ?= protoc
HOSTPROTOC ?= protoc
"""
)
assert has_package("protobuf"), "required package 'protobuf' not installed"
def _getprotodeps(deps):
r = set()
for d in deps:
r.update(d.args.get("protodeps", {d}))
return sorted(r)
@Rule
def proto(self, name, srcs: Targets = [], deps: Targets = []):
protodeps = _getprotodeps(deps)
descriptorlist = ":".join(
[
relpath(f, start=self.dir)
for f in filenamesmatchingof(protodeps, "*.descriptor")
]
)
dirs = sorted({"$[dir]/" + dirname(f) for f in filenamesof(srcs)})
simplerule(
replaces=self,
ins=srcs,
outs=[f"={self.localname}.descriptor"],
deps=protodeps,
commands=(
["mkdir -p " + (" ".join(dirs))]
+ [f"$(CP) {f} $[dir]/{f}" for f in filenamesof(srcs)]
+ [
"cd $[dir] && "
+ (
" ".join(
[
"$(PROTOC)",
"--proto_path=.",
"--include_source_info",
f"--descriptor_set_out={self.localname}.descriptor",
]
+ (
[f"--descriptor_set_in={descriptorlist}"]
if descriptorlist
else []
)
+ ["$[ins]"]
)
)
]
),
label="PROTO",
args={
"protosrcs": filenamesof(srcs),
"protodeps": set(protodeps) | {self},
},
)
@Rule
def protolib(self, name, srcs: Targets = []):
simplerule(
replaces=self,
label="PROTOLIB",
args={
"protosrcs": collectattrs(targets=srcs, name="protosrcs"),
"protodeps": set(_getprotodeps(srcs)),
},
)
@Rule
def protocc(self, name, srcs: Targets = [], deps: Targets = []):
outs = []
protos = []
allsrcs = collectattrs(targets=srcs, name="protosrcs")
assert allsrcs, "no sources provided"
for f in filenamesmatchingof(allsrcs, "*.proto"):
cc = f.replace(".proto", ".pb.cc")
h = f.replace(".proto", ".pb.h")
protos += [f]
outs += ["=" + cc, "=" + h]
protodeps = _getprotodeps(deps + srcs)
descriptorlist = ":".join(
[
relpath(f, start=self.dir)
for f in filenamesmatchingof(protodeps, "*.descriptor")
]
)
r = simplerule(
name=f"{self.localname}_srcs",
cwd=self.cwd,
ins=srcs,
outs=outs,
deps=protodeps,
commands=[
"cd $[dir] && "
+ (
" ".join(
[
"$(PROTOC)",
"--proto_path=.",
"--cpp_out=.",
f"--descriptor_set_in={descriptorlist}",
]
+ protos
)
)
],
label="PROTOCC",
)
headers = {f[1:]: join(r.dir, f[1:]) for f in outs if f.endswith(".pb.h")}
from build.c import cxxlibrary
cxxlibrary(
replaces=self,
srcs=[r],
deps=deps,
hdrs=headers,
)
@Rule
def protojava(self, name, srcs: Targets = [], deps: Targets = []):
outs = []
allsrcs = collectattrs(targets=srcs, name="protosrcs")
assert allsrcs, "no sources provided"
protos = []
for f in filenamesmatchingof(allsrcs, "*.proto"):
protos += [f]
srcs += [f]
descriptorlist = ":".join(
[abspath(f) for f in filenamesmatchingof(srcs + deps, "*.descriptor")]
)
r = simplerule(
name=f"{self.localname}_srcs",
cwd=self.cwd,
ins=protos,
outs=[f"={self.localname}.srcjar"],
deps=srcs + deps,
commands=[
"mkdir -p $[dir]/srcs",
"cd $[dir]/srcs && "
+ (
" ".join(
[
"$(PROTOC)",
"--proto_path=.",
"--java_out=.",
f"--descriptor_set_in={descriptorlist}",
]
+ protos
)
),
"$(JAR) cf $[outs[0]] -C $[dir]/srcs .",
],
traits={"srcjar"},
label="PROTOJAVA",
)
from build.java import javalibrary
javalibrary(
replaces=self,
deps=[r] + deps,
)

View File

@@ -1,18 +0,0 @@
definerule("test",
{
srcs = { type="targets", default={} },
},
function (e)
if vars.TESTS == "yes" then
normalrule {
name = e.name,
ins = e.srcs,
outleaves = { "log.txt" },
commands = {
"%{ins} > %{outs}",
}
}
end
end
)

11
build/toolchain.py Normal file
View File

@@ -0,0 +1,11 @@
import platform
_is_windows = (platform.system() == "Windows")
class Toolchain:
PREFIX = ""
EXE = ".exe" if _is_windows else ""
class HostToolchain(Toolchain):
PREFIX = "HOST"

92
build/utils.py Normal file
View File

@@ -0,0 +1,92 @@
from build.ab import (
Rule,
Target,
Targets,
filenameof,
filenamesof,
cwdStack,
error,
simplerule,
)
from os.path import relpath, splitext, join, basename, isfile
from glob import iglob
import fnmatch
def filenamesmatchingof(xs, pattern):
return fnmatch.filter(filenamesof(xs), pattern)
def stripext(path):
return splitext(path)[0]
def targetswithtraitsof(xs, trait):
return [t for t in xs if trait in t.traits]
def collectattrs(*, targets, name, initial=[]):
s = set(initial)
for a in [t.args.get(name, []) for t in targets]:
s.update(a)
return sorted(s)
def itemsof(pattern, root=None, cwd=None):
if not cwd:
cwd = cwdStack[-1]
if not root:
root = "."
pattern = join(cwd, pattern)
root = join(cwd, root)
result = {}
for f in iglob(pattern, recursive=True):
try:
if isfile(f):
result[relpath(f, root)] = f
except ValueError:
error(f"file '{f}' is not in root '{root}'")
return result
@Rule
def objectify(self, name, src: Target, symbol):
simplerule(
replaces=self,
ins=["build/_objectify.py", src],
outs=[f"={basename(filenameof(src))}.h"],
commands=["$(PYTHON) $[ins[0]] $[ins[1]] " + symbol + " > $[outs]"],
label="OBJECTIFY",
)
@Rule
def test(
self,
name,
command: Target = None,
commands=None,
ins: Targets = None,
deps: Targets = None,
label="TEST",
):
if command:
simplerule(
replaces=self,
ins=[command],
outs=["=sentinel"],
commands=["$[ins[0]]", "touch $[outs[0]]"],
deps=deps,
label=label,
)
else:
simplerule(
replaces=self,
ins=ins,
outs=["=sentinel"],
commands=commands + ["touch $[outs[0]]"],
deps=deps,
label=label,
)

27
build/zip.py Normal file
View File

@@ -0,0 +1,27 @@
from build.ab import (
Rule,
simplerule,
TargetsMap,
filenameof,
)
@Rule
def zip(
self, name, flags="", items: TargetsMap = {}, extension="zip", label="ZIP"
):
cs = ["$(PYTHON) build/_zip.py -z $[outs]"]
ins = []
for k, v in items.items():
cs += [f"-f {k} {filenameof(v)}"]
ins += [v]
simplerule(
replaces=self,
ins=ins,
deps=["build/_zip.py"],
outs=[f"={self.localname}." + extension],
commands=[" ".join(cs)],
label=label,
)

11
config.py Normal file
View File

@@ -0,0 +1,11 @@
import platform
import os
if os.getenv("BUILDTYPE") == "windows":
windows = True
osx = False
unix = False
else:
windows = False
osx = platform.system() == "Darwin"
unix = True

View File

@@ -2,33 +2,34 @@
#define ADF_NATIV_H
#ifdef __cplusplus
extern "C" {
extern "C"
{
#endif
#include "adf_str.h"
#define NATIVE_FILE 8001
struct nativeDevice
{
FILE* fd;
};
struct nativeDevice
{
FILE* fd;
};
struct nativeFunctions
{
/* called by adfMount() */
RETCODE (*adfInitDevice)(struct Device*, char*, BOOL);
/* called by adfReadBlock() */
RETCODE (*adfNativeReadSector)(struct Device*, int32_t, int, uint8_t*);
/* called by adfWriteBlock() */
RETCODE (*adfNativeWriteSector)(struct Device*, int32_t, int, uint8_t*);
/* called by adfMount() */
BOOL (*adfIsDevNative)(char*);
/* called by adfUnMount() */
RETCODE (*adfReleaseDevice)(struct Device*);
};
struct nativeFunctions
{
/* called by adfMount() */
RETCODE (*adfInitDevice)(struct Device*, char*, BOOL);
/* called by adfReadBlock() */
RETCODE (*adfNativeReadSector)(struct Device*, int32_t, int, uint8_t*);
/* called by adfWriteBlock() */
RETCODE (*adfNativeWriteSector)(struct Device*, int32_t, int, uint8_t*);
/* called by adfMount() */
BOOL (*adfIsDevNative)(char*);
/* called by adfUnMount() */
RETCODE (*adfReleaseDevice)(struct Device*);
};
extern void adfInitNativeFct();
extern void adfInitNativeFct();
#ifdef __cplusplus
}

View File

@@ -1,22 +0,0 @@
ADFLIB_SRCS = \
dep/adflib/src/adf_bitm.c \
dep/adflib/src/adf_cache.c \
dep/adflib/src/adf_dir.c \
dep/adflib/src/adf_disk.c \
dep/adflib/src/adf_dump.c \
dep/adflib/src/adf_env.c \
dep/adflib/src/adf_file.c \
dep/adflib/src/adf_hd.c \
dep/adflib/src/adf_link.c \
dep/adflib/src/adf_raw.c \
dep/adflib/src/adf_salv.c \
dep/adflib/src/adf_util.c \
ADFLIB_OBJS = $(patsubst %.c, $(OBJDIR)/%.o, $(ADFLIB_SRCS))
$(ADFLIB_OBJS): CFLAGS += -Idep/adflib/src -Idep/adflib
ADFLIB_LIB = $(OBJDIR)/libadflib.a
$(ADFLIB_LIB): $(ADFLIB_OBJS)
ADFLIB_CFLAGS = -Idep/adflib/src
ADFLIB_LDFLAGS = $(ADFLIB_LIB)
OBJS += $(ADFLIB_OBJS)

47
dep/adflib/build.py Normal file
View File

@@ -0,0 +1,47 @@
from build.c import clibrary
clibrary(
name="adflib",
srcs=[
"./src/adf_bitm.c",
"./src/adf_bitm.h",
"./src/adf_cache.c",
"./src/adf_cache.h",
"./src/adf_dir.c",
"./src/adf_dir.h",
"./src/adf_disk.c",
"./src/adf_disk.h",
"./src/adf_dump.c",
"./src/adf_dump.h",
"./src/adf_env.c",
"./src/adf_env.h",
"./src/adf_file.c",
"./src/adf_file.h",
"./src/adf_hd.c",
"./src/adf_hd.h",
"./src/adf_link.c",
"./src/adf_link.h",
"./src/adf_raw.c",
"./src/adf_raw.h",
"./src/adf_salv.c",
"./src/adf_salv.h",
"./src/adf_str.h",
"./src/adf_util.c",
"./src/adf_util.h",
"./src/defendian.h",
"./src/hd_blk.h",
"./src/prefix.h",
"./adf_nativ.h",
"./config.h",
"./src/adflib.h",
],
cflags=["-Idep/adflib", "-Idep/adflib/src"],
hdrs={
"adf_blk.h": "./src/adf_blk.h",
"adf_defs.h": "./src/adf_defs.h",
"adf_err.h": "./src/adf_err.h",
"adf_nativ.h": "./adf_nativ.h",
"adf_str.h": "./src/adf_str.h",
"adflib.h": "./src/adflib.h",
},
)

View File

@@ -1,2 +1 @@
/* empty config.h to keep the source happy */

View File

@@ -27,25 +27,28 @@
*
*/
#include"adf_str.h"
#include"prefix.h"
#include "adf_str.h"
#include "prefix.h"
RETCODE adfReadBitmapBlock(struct Volume*, SECTNUM nSect, struct bBitmapBlock*);
RETCODE adfWriteBitmapBlock(struct Volume*, SECTNUM nSect, struct bBitmapBlock*);
RETCODE adfReadBitmapExtBlock(struct Volume*, SECTNUM nSect, struct bBitmapExtBlock*);
RETCODE adfWriteBitmapExtBlock(struct Volume*, SECTNUM, struct bBitmapExtBlock* );
RETCODE adfWriteBitmapBlock(
struct Volume*, SECTNUM nSect, struct bBitmapBlock*);
RETCODE adfReadBitmapExtBlock(
struct Volume*, SECTNUM nSect, struct bBitmapExtBlock*);
RETCODE adfWriteBitmapExtBlock(
struct Volume*, SECTNUM, struct bBitmapExtBlock*);
SECTNUM adfGet1FreeBlock(struct Volume *vol);
RETCODE adfUpdateBitmap(struct Volume *vol);
SECTNUM adfGet1FreeBlock(struct Volume* vol);
RETCODE adfUpdateBitmap(struct Volume* vol);
PREFIX int32_t adfCountFreeBlocks(struct Volume* vol);
RETCODE adfReadBitmap(struct Volume* , SECTNUM nBlock, struct bRootBlock* root);
RETCODE adfReadBitmap(struct Volume*, SECTNUM nBlock, struct bRootBlock* root);
BOOL adfIsBlockFree(struct Volume* vol, SECTNUM nSect);
void adfSetBlockFree(struct Volume* vol, SECTNUM nSect);
void adfSetBlockUsed(struct Volume* vol, SECTNUM nSect);
BOOL adfGetFreeBlocks(struct Volume* vol, int nbSect, SECTNUM* sectList);
RETCODE adfCreateBitmap(struct Volume *vol);
RETCODE adfWriteNewBitmap(struct Volume *vol);
void adfFreeBitmap(struct Volume *vol);
RETCODE adfCreateBitmap(struct Volume* vol);
RETCODE adfWriteNewBitmap(struct Volume* vol);
void adfFreeBitmap(struct Volume* vol);
#endif /* ADF_BITM_H */

View File

@@ -25,264 +25,255 @@
*
*/
#ifndef ADF_BLK_H
#define ADF_BLK_H 1
#define ULONG uint32_t
#define USHORT uint16_t
#define UCHAR uint8_t
#define ULONG uint32_t
#define USHORT uint16_t
#define UCHAR uint8_t
#define LOGICAL_BLOCK_SIZE 512
#define LOGICAL_BLOCK_SIZE 512
/* ----- FILE SYSTEM ----- */
#define FSMASK_FFS 1
#define FSMASK_INTL 2
#define FSMASK_DIRCACHE 4
#define isFFS(c) ((c)&FSMASK_FFS)
#define isOFS(c) (!((c)&FSMASK_FFS))
#define isINTL(c) ((c)&FSMASK_INTL)
#define isDIRCACHE(c) ((c)&FSMASK_DIRCACHE)
#define FSMASK_FFS 1
#define FSMASK_INTL 2
#define FSMASK_DIRCACHE 4
#define isFFS(c) ((c)&FSMASK_FFS)
#define isOFS(c) (!((c)&FSMASK_FFS))
#define isINTL(c) ((c)&FSMASK_INTL)
#define isDIRCACHE(c) ((c)&FSMASK_DIRCACHE)
/* ----- ENTRIES ----- */
/* access constants */
#define ACCMASK_D (1<<0)
#define ACCMASK_E (1<<1)
#define ACCMASK_W (1<<2)
#define ACCMASK_R (1<<3)
#define ACCMASK_A (1<<4)
#define ACCMASK_P (1<<5)
#define ACCMASK_S (1<<6)
#define ACCMASK_H (1<<7)
#define hasD(c) ((c)&ACCMASK_D)
#define hasE(c) ((c)&ACCMASK_E)
#define hasW(c) ((c)&ACCMASK_W)
#define hasR(c) ((c)&ACCMASK_R)
#define hasA(c) ((c)&ACCMASK_A)
#define hasP(c) ((c)&ACCMASK_P)
#define hasS(c) ((c)&ACCMASK_S)
#define hasH(c) ((c)&ACCMASK_H)
#define ACCMASK_D (1 << 0)
#define ACCMASK_E (1 << 1)
#define ACCMASK_W (1 << 2)
#define ACCMASK_R (1 << 3)
#define ACCMASK_A (1 << 4)
#define ACCMASK_P (1 << 5)
#define ACCMASK_S (1 << 6)
#define ACCMASK_H (1 << 7)
#define hasD(c) ((c)&ACCMASK_D)
#define hasE(c) ((c)&ACCMASK_E)
#define hasW(c) ((c)&ACCMASK_W)
#define hasR(c) ((c)&ACCMASK_R)
#define hasA(c) ((c)&ACCMASK_A)
#define hasP(c) ((c)&ACCMASK_P)
#define hasS(c) ((c)&ACCMASK_S)
#define hasH(c) ((c)&ACCMASK_H)
/* ----- BLOCKS ----- */
/* block constants */
#define BM_VALID -1
#define BM_INVALID 0
#define BM_VALID -1
#define BM_INVALID 0
#define HT_SIZE 72
#define BM_SIZE 25
#define MAX_DATABLK 72
#define MAXNAMELEN 30
#define MAXCMMTLEN 79
#define HT_SIZE 72
#define BM_SIZE 25
#define MAX_DATABLK 72
#define MAXNAMELEN 30
#define MAXCMMTLEN 79
/* block primary and secondary types */
#define T_HEADER 2
#define ST_ROOT 1
#define ST_DIR 2
#define ST_FILE -3
#define ST_LFILE -4
#define ST_LDIR 4
#define ST_LSOFT 3
#define T_LIST 16
#define T_DATA 8
#define T_DIRC 33
#define T_HEADER 2
#define ST_ROOT 1
#define ST_DIR 2
#define ST_FILE -3
#define ST_LFILE -4
#define ST_LDIR 4
#define ST_LSOFT 3
#define T_LIST 16
#define T_DATA 8
#define T_DIRC 33
/*--- blocks structures --- */
struct bBootBlock {
/*000*/ char dosType[4];
/*004*/ ULONG checkSum;
/*008*/ int32_t rootBlock;
/*00c*/ UCHAR data[500+512];
struct bBootBlock
{
/*000*/ char dosType[4];
/*004*/ ULONG checkSum;
/*008*/ int32_t rootBlock;
/*00c*/ UCHAR data[500 + 512];
};
struct bRootBlock {
/*000*/ int32_t type;
int32_t headerKey;
int32_t highSeq;
/*00c*/ int32_t hashTableSize;
int32_t firstData;
/*014*/ ULONG checkSum;
/*018*/ int32_t hashTable[HT_SIZE]; /* hash table */
/*138*/ int32_t bmFlag; /* bitmap flag, -1 means VALID */
/*13c*/ int32_t bmPages[BM_SIZE];
/*1a0*/ int32_t bmExt;
/*1a4*/ int32_t cDays; /* creation date FFS and OFS */
/*1a8*/ int32_t cMins;
/*1ac*/ int32_t cTicks;
/*1b0*/ char nameLen;
/*1b1*/ char diskName[MAXNAMELEN+1];
char r2[8];
/*1d8*/ int32_t days; /* last access : days after 1 jan 1978 */
/*1dc*/ int32_t mins; /* hours and minutes in minutes */
/*1e0*/ int32_t ticks; /* 1/50 seconds */
/*1e4*/ int32_t coDays; /* creation date OFS */
/*1e8*/ int32_t coMins;
/*1ec*/ int32_t coTicks;
int32_t nextSameHash; /* == 0 */
int32_t parent; /* == 0 */
/*1f8*/ int32_t extension; /* FFS: first directory cache block */
/*1fc*/ int32_t secType; /* == 1 */
struct bRootBlock
{
/*000*/ int32_t type;
int32_t headerKey;
int32_t highSeq;
/*00c*/ int32_t hashTableSize;
int32_t firstData;
/*014*/ ULONG checkSum;
/*018*/ int32_t hashTable[HT_SIZE]; /* hash table */
/*138*/ int32_t bmFlag; /* bitmap flag, -1 means VALID */
/*13c*/ int32_t bmPages[BM_SIZE];
/*1a0*/ int32_t bmExt;
/*1a4*/ int32_t cDays; /* creation date FFS and OFS */
/*1a8*/ int32_t cMins;
/*1ac*/ int32_t cTicks;
/*1b0*/ char nameLen;
/*1b1*/ char diskName[MAXNAMELEN + 1];
char r2[8];
/*1d8*/ int32_t days; /* last access : days after 1 jan 1978 */
/*1dc*/ int32_t mins; /* hours and minutes in minutes */
/*1e0*/ int32_t ticks; /* 1/50 seconds */
/*1e4*/ int32_t coDays; /* creation date OFS */
/*1e8*/ int32_t coMins;
/*1ec*/ int32_t coTicks;
int32_t nextSameHash; /* == 0 */
int32_t parent; /* == 0 */
/*1f8*/ int32_t extension; /* FFS: first directory cache block */
/*1fc*/ int32_t secType; /* == 1 */
};
struct bFileHeaderBlock {
/*000*/ int32_t type; /* == 2 */
/*004*/ int32_t headerKey; /* current block number */
/*008*/ int32_t highSeq; /* number of data block in this hdr block */
/*00c*/ int32_t dataSize; /* == 0 */
/*010*/ int32_t firstData;
/*014*/ ULONG checkSum;
/*018*/ int32_t dataBlocks[MAX_DATABLK];
/*138*/ int32_t r1;
/*13c*/ int32_t r2;
/*140*/ int32_t access; /* bit0=del, 1=modif, 2=write, 3=read */
/*144*/ uint32_t byteSize;
/*148*/ char commLen;
/*149*/ char comment[MAXCMMTLEN+1];
char r3[91-(MAXCMMTLEN+1)];
/*1a4*/ int32_t days;
/*1a8*/ int32_t mins;
/*1ac*/ int32_t ticks;
/*1b0*/ char nameLen;
/*1b1*/ char fileName[MAXNAMELEN+1];
int32_t r4;
/*1d4*/ int32_t real; /* unused == 0 */
/*1d8*/ int32_t nextLink; /* link chain */
int32_t r5[5];
/*1f0*/ int32_t nextSameHash; /* next entry with sane hash */
/*1f4*/ int32_t parent; /* parent directory */
/*1f8*/ int32_t extension; /* pointer to extension block */
/*1fc*/ int32_t secType; /* == -3 */
struct bFileHeaderBlock
{
/*000*/ int32_t type; /* == 2 */
/*004*/ int32_t headerKey; /* current block number */
/*008*/ int32_t highSeq; /* number of data block in this hdr block */
/*00c*/ int32_t dataSize; /* == 0 */
/*010*/ int32_t firstData;
/*014*/ ULONG checkSum;
/*018*/ int32_t dataBlocks[MAX_DATABLK];
/*138*/ int32_t r1;
/*13c*/ int32_t r2;
/*140*/ int32_t access; /* bit0=del, 1=modif, 2=write, 3=read */
/*144*/ uint32_t byteSize;
/*148*/ char commLen;
/*149*/ char comment[MAXCMMTLEN + 1];
char r3[91 - (MAXCMMTLEN + 1)];
/*1a4*/ int32_t days;
/*1a8*/ int32_t mins;
/*1ac*/ int32_t ticks;
/*1b0*/ char nameLen;
/*1b1*/ char fileName[MAXNAMELEN + 1];
int32_t r4;
/*1d4*/ int32_t real; /* unused == 0 */
/*1d8*/ int32_t nextLink; /* link chain */
int32_t r5[5];
/*1f0*/ int32_t nextSameHash; /* next entry with sane hash */
/*1f4*/ int32_t parent; /* parent directory */
/*1f8*/ int32_t extension; /* pointer to extension block */
/*1fc*/ int32_t secType; /* == -3 */
};
/*--- file header extension block structure ---*/
struct bFileExtBlock {
/*000*/ int32_t type; /* == 0x10 */
/*004*/ int32_t headerKey;
/*008*/ int32_t highSeq;
/*00c*/ int32_t dataSize; /* == 0 */
/*010*/ int32_t firstData; /* == 0 */
/*014*/ ULONG checkSum;
/*018*/ int32_t dataBlocks[MAX_DATABLK];
int32_t r[45];
int32_t info; /* == 0 */
int32_t nextSameHash; /* == 0 */
/*1f4*/ int32_t parent; /* header block */
/*1f8*/ int32_t extension; /* next header extension block */
/*1fc*/ int32_t secType; /* -3 */
struct bFileExtBlock
{
/*000*/ int32_t type; /* == 0x10 */
/*004*/ int32_t headerKey;
/*008*/ int32_t highSeq;
/*00c*/ int32_t dataSize; /* == 0 */
/*010*/ int32_t firstData; /* == 0 */
/*014*/ ULONG checkSum;
/*018*/ int32_t dataBlocks[MAX_DATABLK];
int32_t r[45];
int32_t info; /* == 0 */
int32_t nextSameHash; /* == 0 */
/*1f4*/ int32_t parent; /* header block */
/*1f8*/ int32_t extension; /* next header extension block */
/*1fc*/ int32_t secType; /* -3 */
};
struct bDirBlock {
/*000*/ int32_t type; /* == 2 */
/*004*/ int32_t headerKey;
/*008*/ int32_t highSeq; /* == 0 */
/*00c*/ int32_t hashTableSize; /* == 0 */
int32_t r1; /* == 0 */
/*014*/ ULONG checkSum;
/*018*/ int32_t hashTable[HT_SIZE]; /* hash table */
int32_t r2[2];
/*140*/ int32_t access;
int32_t r4; /* == 0 */
/*148*/ char commLen;
/*149*/ char comment[MAXCMMTLEN+1];
char r5[91-(MAXCMMTLEN+1)];
/*1a4*/ int32_t days; /* last access */
/*1a8*/ int32_t mins;
/*1ac*/ int32_t ticks;
/*1b0*/ char nameLen;
/*1b1*/ char dirName[MAXNAMELEN+1];
int32_t r6;
/*1d4*/ int32_t real; /* ==0 */
/*1d8*/ int32_t nextLink; /* link list */
int32_t r7[5];
/*1f0*/ int32_t nextSameHash;
/*1f4*/ int32_t parent;
/*1f8*/ int32_t extension; /* FFS : first directory cache */
/*1fc*/ int32_t secType; /* == 2 */
struct bDirBlock
{
/*000*/ int32_t type; /* == 2 */
/*004*/ int32_t headerKey;
/*008*/ int32_t highSeq; /* == 0 */
/*00c*/ int32_t hashTableSize; /* == 0 */
int32_t r1; /* == 0 */
/*014*/ ULONG checkSum;
/*018*/ int32_t hashTable[HT_SIZE]; /* hash table */
int32_t r2[2];
/*140*/ int32_t access;
int32_t r4; /* == 0 */
/*148*/ char commLen;
/*149*/ char comment[MAXCMMTLEN + 1];
char r5[91 - (MAXCMMTLEN + 1)];
/*1a4*/ int32_t days; /* last access */
/*1a8*/ int32_t mins;
/*1ac*/ int32_t ticks;
/*1b0*/ char nameLen;
/*1b1*/ char dirName[MAXNAMELEN + 1];
int32_t r6;
/*1d4*/ int32_t real; /* ==0 */
/*1d8*/ int32_t nextLink; /* link list */
int32_t r7[5];
/*1f0*/ int32_t nextSameHash;
/*1f4*/ int32_t parent;
/*1f8*/ int32_t extension; /* FFS : first directory cache */
/*1fc*/ int32_t secType; /* == 2 */
};
struct bOFSDataBlock{
/*000*/ int32_t type; /* == 8 */
/*004*/ int32_t headerKey; /* pointer to file_hdr block */
/*008*/ int32_t seqNum; /* file data block number */
/*00c*/ int32_t dataSize; /* <= 0x1e8 */
/*010*/ int32_t nextData; /* next data block */
/*014*/ ULONG checkSum;
/*018*/ UCHAR data[488];
struct bOFSDataBlock
{
/*000*/ int32_t type; /* == 8 */
/*004*/ int32_t headerKey; /* pointer to file_hdr block */
/*008*/ int32_t seqNum; /* file data block number */
/*00c*/ int32_t dataSize; /* <= 0x1e8 */
/*010*/ int32_t nextData; /* next data block */
/*014*/ ULONG checkSum;
/*018*/ UCHAR data[488];
/*200*/ };
/* --- bitmap --- */
struct bBitmapBlock {
/*000*/ ULONG checkSum;
/*004*/ ULONG map[127];
};
struct bBitmapExtBlock {
/*000*/ int32_t bmPages[127];
/*1fc*/ int32_t nextBlock;
};
struct bLinkBlock {
/*000*/ int32_t type; /* == 2 */
/*004*/ int32_t headerKey; /* self pointer */
int32_t r1[3];
/*014*/ ULONG checkSum;
/*018*/ char realName[64];
int32_t r2[83];
/*1a4*/ int32_t days; /* last access */
/*1a8*/ int32_t mins;
/*1ac*/ int32_t ticks;
/*1b0*/ char nameLen;
/*1b1*/ char name[MAXNAMELEN+1];
int32_t r3;
/*1d4*/ int32_t realEntry;
/*1d8*/ int32_t nextLink;
int32_t r4[5];
/*1f0*/ int32_t nextSameHash;
/*1f4*/ int32_t parent;
int32_t r5;
/*1fc*/ int32_t secType; /* == -4, 4, 3 */
};
struct bBitmapBlock
{
/*000*/ ULONG checkSum;
/*004*/ ULONG map[127];
};
struct bBitmapExtBlock
{
/*000*/ int32_t bmPages[127];
/*1fc*/ int32_t nextBlock;
};
struct bLinkBlock
{
/*000*/ int32_t type; /* == 2 */
/*004*/ int32_t headerKey; /* self pointer */
int32_t r1[3];
/*014*/ ULONG checkSum;
/*018*/ char realName[64];
int32_t r2[83];
/*1a4*/ int32_t days; /* last access */
/*1a8*/ int32_t mins;
/*1ac*/ int32_t ticks;
/*1b0*/ char nameLen;
/*1b1*/ char name[MAXNAMELEN + 1];
int32_t r3;
/*1d4*/ int32_t realEntry;
/*1d8*/ int32_t nextLink;
int32_t r4[5];
/*1f0*/ int32_t nextSameHash;
/*1f4*/ int32_t parent;
int32_t r5;
/*1fc*/ int32_t secType; /* == -4, 4, 3 */
};
/*--- directory cache block structure ---*/
struct bDirCacheBlock {
/*000*/ int32_t type; /* == 33 */
/*004*/ int32_t headerKey;
/*008*/ int32_t parent;
/*00c*/ int32_t recordsNb;
/*010*/ int32_t nextDirC;
/*014*/ ULONG checkSum;
/*018*/ uint8_t records[488];
};
struct bDirCacheBlock
{
/*000*/ int32_t type; /* == 33 */
/*004*/ int32_t headerKey;
/*008*/ int32_t parent;
/*00c*/ int32_t recordsNb;
/*010*/ int32_t nextDirC;
/*014*/ ULONG checkSum;
/*018*/ uint8_t records[488];
};
#endif /* ADF_BLK_H */
/*##########################################################################*/

View File

@@ -27,20 +27,28 @@
*
*/
#include "adf_str.h"
void adfGetCacheEntry(struct bDirCacheBlock *dirc, int *p, struct CacheEntry *cEntry);
int adfPutCacheEntry( struct bDirCacheBlock *dirc, int *p, struct CacheEntry *cEntry);
void adfGetCacheEntry(
struct bDirCacheBlock* dirc, int* p, struct CacheEntry* cEntry);
int adfPutCacheEntry(
struct bDirCacheBlock* dirc, int* p, struct CacheEntry* cEntry);
struct List* adfGetDirEntCache(struct Volume *vol, SECTNUM dir, BOOL recurs);
struct List* adfGetDirEntCache(struct Volume* vol, SECTNUM dir, BOOL recurs);
RETCODE adfCreateEmptyCache(struct Volume *vol, struct bEntryBlock *parent, SECTNUM nSect);
RETCODE adfAddInCache(struct Volume *vol, struct bEntryBlock *parent, struct bEntryBlock *entry);
RETCODE adfUpdateCache(struct Volume *vol, struct bEntryBlock *parent, struct bEntryBlock *entry, BOOL);
RETCODE adfDelFromCache(struct Volume *vol, struct bEntryBlock *parent, SECTNUM);
RETCODE adfCreateEmptyCache(
struct Volume* vol, struct bEntryBlock* parent, SECTNUM nSect);
RETCODE adfAddInCache(
struct Volume* vol, struct bEntryBlock* parent, struct bEntryBlock* entry);
RETCODE adfUpdateCache(struct Volume* vol,
struct bEntryBlock* parent,
struct bEntryBlock* entry,
BOOL);
RETCODE adfDelFromCache(
struct Volume* vol, struct bEntryBlock* parent, SECTNUM);
RETCODE adfReadDirCBlock(struct Volume *vol, SECTNUM nSect, struct bDirCacheBlock *dirc);
RETCODE adfReadDirCBlock(
struct Volume* vol, SECTNUM nSect, struct bDirCacheBlock* dirc);
RETCODE adfWriteDirCBlock(struct Volume*, int32_t, struct bDirCacheBlock* dirc);
#endif /* _ADF_CACHE_H */

View File

@@ -24,7 +24,6 @@
*
*/
#ifndef _ADF_DEFS_H
#define _ADF_DEFS_H 1
@@ -34,38 +33,33 @@
#define SECTNUM int32_t
#define RETCODE int32_t
#define TRUE 1
#define FALSE 0
#define TRUE 1
#define FALSE 0
#include <stdint.h>
#define ULONG uint32_t
#define USHORT uint16_t
#define UCHAR uint8_t
#define BOOL int
#define ULONG uint32_t
#define USHORT uint16_t
#define UCHAR uint8_t
#define BOOL int
/* defines max and min */
#ifndef max
#define max(a,b) (a)>(b) ? (a) : (b)
#define max(a, b) (a) > (b) ? (a) : (b)
#endif
#ifndef min
#define min(a,b) (a)<(b) ? (a) : (b)
#define min(a, b) (a) < (b) ? (a) : (b)
#endif
/* (*byte) to (*short) and (*byte) to (*long) conversion */
#define Short(p) ((p)[0]<<8 | (p)[1])
#define Long(p) (Short(p)<<16 | Short(p+2))
#define Short(p) ((p)[0] << 8 | (p)[1])
#define Long(p) (Short(p) << 16 | Short(p + 2))
/* swap short and swap long macros for little endian machines */
#define swapShort(p) ((p)[0]<<8 | (p)[1])
#define swapLong(p) (swapShort(p)<<16 | swapShort(p+2))
#define swapShort(p) ((p)[0] << 8 | (p)[1])
#define swapLong(p) (swapShort(p) << 16 | swapShort(p + 2))
#endif /* _ADF_DEFS_H */
/*##########################################################################*/

Some files were not shown because too many files have changed in this diff Show More