Compare commits

...

9 Commits
v0.2 ... master

  1. 76
      .pylintrc
  2. 4
      Dockerfile
  3. 6
      README.md
  4. 6
      asl_rulebook2/bin/fixup_mmp_pdf.py
  5. 6
      asl_rulebook2/bin/prepare_pdf.py
  6. 149
      asl_rulebook2/extract/content.py
  7. 6
      asl_rulebook2/extract/data/chapter-fixups.json
  8. 85
      asl_rulebook2/extract/data/footnote-fixups.json
  9. 7
      asl_rulebook2/extract/data/index-fixups.json
  10. 103
      asl_rulebook2/extract/data/target-fixups.json
  11. 330
      asl_rulebook2/extract/data/vo-note-fixups.json
  12. 5
      asl_rulebook2/extract/index.py
  13. 4
      asl_rulebook2/pdf.py
  14. 30
      asl_rulebook2/tests/test_extract.py
  15. 4
      asl_rulebook2/utils.py
  16. 5
      asl_rulebook2/webapp/asop.py
  17. 2
      asl_rulebook2/webapp/config/constants.py
  18. 2
      asl_rulebook2/webapp/config/search-aliases.json
  19. 6
      asl_rulebook2/webapp/content.py
  20. 7
      asl_rulebook2/webapp/doc.py
  21. 2
      asl_rulebook2/webapp/main.py
  22. 4
      asl_rulebook2/webapp/prepare.py
  23. 4
      asl_rulebook2/webapp/rule_info.py
  24. 3
      asl_rulebook2/webapp/run_server.py
  25. 29
      asl_rulebook2/webapp/search.py
  26. 4
      asl_rulebook2/webapp/startup.py
  27. 3
      asl_rulebook2/webapp/static/css/SearchResult.css
  28. 2
      asl_rulebook2/webapp/static/prepare.js
  29. 2
      asl_rulebook2/webapp/tests/control_tests.py
  30. 2
      asl_rulebook2/webapp/tests/control_tests_servicer.py
  31. 181
      asl_rulebook2/webapp/tests/proto/generated/control_tests_pb2.py
  32. 8
      asl_rulebook2/webapp/tests/test_asop.py
  33. 4
      asl_rulebook2/webapp/tests/test_content_sets.py
  34. 4
      asl_rulebook2/webapp/tests/test_doc.py
  35. 6
      asl_rulebook2/webapp/tests/test_footnotes.py
  36. 3
      asl_rulebook2/webapp/tests/test_prepare.py
  37. 11
      asl_rulebook2/webapp/tests/utils.py
  38. 11
      conftest.py
  39. 29
      doc/features/global.css
  40. 34
      doc/features/index.html
  41. 10
      requirements-dev.txt
  42. 20
      requirements.txt
  43. 15
      setup.py

@ -61,17 +61,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=print-statement,
parameter-unpacking,
unpacking-in-except,
old-raise-syntax,
backtick,
long-suffix,
old-ne-operator,
old-octal-literal,
import-star-module-level,
non-ascii-bytes-literal,
raw-checker-failed,
disable=raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
@ -79,67 +69,6 @@ disable=print-statement,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
apply-builtin,
basestring-builtin,
buffer-builtin,
cmp-builtin,
coerce-builtin,
execfile-builtin,
file-builtin,
long-builtin,
raw_input-builtin,
reduce-builtin,
standarderror-builtin,
unicode-builtin,
xrange-builtin,
coerce-method,
delslice-method,
getslice-method,
setslice-method,
no-absolute-import,
old-division,
dict-iter-method,
dict-view-method,
next-method-called,
metaclass-assignment,
indexing-exception,
raising-string,
reload-builtin,
oct-method,
hex-method,
nonzero-method,
cmp-method,
input-builtin,
round-builtin,
intern-builtin,
unichr-builtin,
map-builtin-not-iterating,
zip-builtin-not-iterating,
range-builtin-not-iterating,
filter-builtin-not-iterating,
using-cmp-argument,
eq-without-hash,
div-method,
idiv-method,
rdiv-method,
exception-message-attribute,
invalid-str-codec,
sys-max-int,
bad-python3-import,
deprecated-string-function,
deprecated-str-translate-call,
deprecated-itertools-function,
deprecated-types-field,
next-method-defined,
dict-items-not-iterating,
dict-keys-not-iterating,
dict-values-not-iterating,
deprecated-operator-function,
deprecated-urllib-function,
xreadlines-attribute,
deprecated-sys-function,
exception-escape,
comprehension-escape,
# custom changes follow
import-outside-toplevel,
global-statement,
@ -148,7 +77,8 @@ disable=print-statement,
duplicate-code,
no-else-return,
consider-using-enumerate,
too-many-lines
too-many-lines,
consider-using-f-string
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option

@ -1,11 +1,11 @@
# NOTE: Use the run-container.sh script to build and launch this container.
# NOTE: Multi-stage builds require Docker >= 17.05.
FROM centos:8 AS base
FROM rockylinux:8.5 AS base
# update packages and install requirements
RUN dnf -y upgrade-minimal && \
dnf install -y python38 && \
dnf install -y python39 && \
dnf install -y ghostscript && \
dnf clean all

@ -3,7 +3,7 @@
<img align="right" src="doc/features/images/asl-rulebook2.small.png">
This program lets you search through the ASL Rulebook index, and jump directly to the rules you're looking for.
Click [here](https://htmlpreview.github.io/?https://github.com/pacman-ghost/asl-rulebook2/blob/master/doc/features/index.html) for more details.
Click [here](https://code.pacman-ghost.com/preview/asl-rulebook2/doc/features/index.html) for more details.
<br clear="all">
With [some work](doc/extend.md), you can also:
@ -14,13 +14,13 @@ With [some work](doc/extend.md), you can also:
*NOTE: The program requires Firefox; Chrome doesn't support a key feature it needs.*
*NOTE: This project integrates with my other [`asl-articles`](https://github.com/pacman-ghost/asl-articles) project, so that if an article references a rule, it becomes a clickable link that will open a browser showing that rule. The [`vasl-templates`](https://github.com/pacman-ghost/vasl-templates) project can also be configured to allow vehicles and ordnance to open their corresponding Chapter H entries.*
*NOTE: This project integrates with my other [`asl-articles`](https://code.pacman-ghost.com/public/asl-articles) project, so that if an article references a rule, it becomes a clickable link that will open a browser showing that rule. The [`vasl-templates`](https://code.pacman-ghost.com/public/vasl-templates) project can also be configured to allow vehicles and ordnance to open their corresponding Chapter H entries.*
### Installation
After cloning the repo, you can either:
- run it using Docker (recommended, run `./run-container.sh --port 5020`)
- run it from source (requires Python 3.8.7, install the module, run `asl_rulebook2/webapp/run_server.py`)
- run it from source (install the module, run `asl_rulebook2/webapp/run_server.py`)
*NOTE: Run either command with `--help` to get help.*

@ -91,6 +91,12 @@ def fixup_mmp_pdf( fname, output_fname, fix_zoom, optimize_web, rotate, log=None
annot.Dest = make_page_destination( pdf, page_no, "XYZ", top=page_height )
log_msg( None, "" )
# FUDGE! v2.01 of the MMP eASLRB PDF had a bodgy p104 (A62) that is a little wider than
# the rest of the pages (dumping the page dimensions using pikepdf didn't show anything unusual,
# but it was rendering differently in Firefox :-/), which was causing the h-scrollbar to appear.
# We hack the page width here to bring it in line with the others.
pdf.pages[ 104-1 ].MediaBox = [ 0, 0, 565, 792 ]
# save the updated PDF
log_msg( "progress", "Saving the fixed-up PDF..." )
# NOTE: Setting a blank password will encrypt the file, but doesn't require the user

@ -32,10 +32,10 @@ def prepare_pdf( pdf_file,
"""Prepare the MMP eASLRB PDF."""
# load the targets
with open( targets_fname, "r" ) as fp:
with open( targets_fname, "r", encoding="utf-8" ) as fp:
targets = json.load( fp )
if vo_notes_fname:
with open( vo_notes_fname, "r" ) as fp:
with open( vo_notes_fname, "r", encoding="utf-8" ) as fp:
vo_notes_targets = json.load( fp )
else:
vo_notes_targets = None
@ -116,7 +116,7 @@ def prepare_pdf( pdf_file,
def _run_subprocess( args, caption, relinq ):
"""Run an external process."""
proc = subprocess.Popen( args )
proc = subprocess.Popen( args ) #pylint: disable=consider-using-with
try:
pass_no = 0
while True:

@ -22,25 +22,36 @@ from asl_rulebook2.utils import parse_page_numbers, fixup_text, append_text, rem
_DISABLE_SORT_ITEMS = [
"B40", # nb: to detect B31.1 NARROW STREET
"A16",
"A54", "A55", "A56",
"A58","A59","A60", # Chapter A footnotes (nb: page A61 is a mess wrt element order :-/)
"B1",
"B45", "B46", # Chapter B footnotes
"C25", "C26", # Chapter C footnotes
"D27", # Chapter D footnotes
"E28", "E29", "E30", # Chapter E footnotes
"F20", "F21", # Chapter F footnotes
"F2", "F6", "F7", "F8", "F9", "F10", "F11", "F16", "F17",
"F18", "F19", # Chapter F footnotes
"G48", "G49", "G50", # Chapter G footnotes
"H9", # Chapter H footnotes
429,431,432,433,434,435, # Italian vehicle notes
436,437,438,439, # Italian ordnance notes
# Chapter H vehicles/ordnace:
359, 360,
363, 374,
376, 383, 387, 388, 390, 393,
408, 417, 426, 429,
434, 436, 438,
449, 452, 453,
467, 468, 469,
486, 492,
529, 530, 531, 532,
547, 549, 589,
]
_DEFAULT_ARGS = {
"chapter-a": "42-102", "chapter-b": "109-154", "chapter-c": "158-183", "chapter-d": "187-213",
"chapter-e": "216-245", "chapter-f": "247-267", "chapter-g": "270-319", "chapter-h": "322-324,326-330",
"chapter-j": "593",
"chapter-w": "647-664",
"content_vp_left": 0, "content_vp_right": 565, "content_vp_top": 715, "content_vp_bottom": 28, # viewport
"chapter-a": "43-104", "chapter-b": "111-156", "chapter-c": "161-186", "chapter-d": "191-217",
"chapter-e": "221-250", "chapter-f": "253-271", "chapter-g": "275-324", "chapter-h": "327-329,331-335",
"chapter-j": "599",
"chapter-w": "653-670",
"content_vp_left": 0, "content_vp_right": 600, "content_vp_top": 715, "content_vp_bottom": 28, # viewport
"disable-sort-items": ",".join( str(si) for si in _DISABLE_SORT_ITEMS )
}
@ -48,35 +59,35 @@ _DEFAULT_ARGS = {
# - the order of the nationality + V/O types
# - the page numbers themselves (so that they get parsed)
_VO_NOTE_SECTIONS = [
[ "german", "vehicles", "330,332,334-343", True ],
[ "german", "ordnance", "344-348", True ],
[ "russian", "vehicles", "348,350-355", True ],
[ "russian", "ordnance", "356-358", True ],
[ "russian", "vehicles", "362,364-368", False ],
[ "russian", "ordnance", "369", False ],
[ "american", "vehicles", "371,373-383", True ],
[ "american", "ordnance", "385-389", True ],
[ "british", "vehicles", "395,398-417", True ],
[ "british", "ordnance", "419-423", True ],
[ "italian", "vehicles", "429,431-435", True ],
[ "italian", "ordnance", "436-439", True ],
[ "japanese", "vehicles", "443-448", True ],
[ "japanese", "ordnance", "448-452", True ],
[ "chinese", "vehicles", "456-459", True ],
[ "chinese", "ordnance", "459-463", True ],
[ "landing-craft", "vehicles", "467-468", True ],
[ "french", "vehicles", "470,472-480", True ],
[ "french", "ordnance", "482-487", True ],
[ "allied-minor", "vehicles", "492-493,495-500", True ],
[ "allied-minor", "ordnance", "501-504", True ],
[ "axis-minor", "vehicles", "506,508-515", True ],
[ "axis-minor", "ordnance", "516,518-527", True ],
[ "finnish", "vehicles", "536,538-541", True ],
[ "finnish", "ordnance", "543,545-549", True ],
[ "un-forces", "vehicles", "554,556-565", True ],
[ "un-forces", "ordnance", "567-570", True ],
[ "communist-forces", "vehicles", "580", True ],
[ "communist-forces", "ordnance", "581-585", True ],
[ "german", "vehicles", "335,337,339-348", True ],
[ "german", "ordnance", "349-353", True ],
[ "russian", "vehicles", "353,355-360", True ],
[ "russian", "ordnance", "361-363", True ],
[ "russian", "vehicles", "367,369-373", False ],
[ "russian", "ordnance", "374", False ],
[ "american", "vehicles", "376,378-388", True ],
[ "american", "ordnance", "390-394", True ],
[ "british", "vehicles", "400,403-422", True ],
[ "british", "ordnance", "424-428", True ],
[ "italian", "vehicles", "434,436-440", True ],
[ "italian", "ordnance", "441-444", True ],
[ "japanese", "vehicles", "449-454", True ],
[ "japanese", "ordnance", "454-458", True ],
[ "chinese", "vehicles", "462-465", True ],
[ "chinese", "ordnance", "465-469", True ],
[ "landing-craft", "vehicles", "473-474", True ],
[ "french", "vehicles", "476,478-486", True ],
[ "french", "ordnance", "488-493", True ],
[ "allied-minor", "vehicles", "498-499,501-506", True ],
[ "allied-minor", "ordnance", "507-510", True ],
[ "axis-minor", "vehicles", "512,514-521", True ],
[ "axis-minor", "ordnance", "522,524-533", True ],
[ "finnish", "vehicles", "542,544-547", True ],
[ "finnish", "ordnance", "549,551-555", True ],
[ "un-forces", "vehicles", "560,562-571", True ],
[ "un-forces", "ordnance", "573-576", True ],
[ "communist-forces", "vehicles", "586", True ],
[ "communist-forces", "ordnance", "587-591", True ],
]
# ---------------------------------------------------------------------
@ -103,7 +114,7 @@ class ExtractContent( ExtractBase ):
self._footnote_fixups = load_fixup( "footnote-fixups.json" )
self._vo_note_fixups = load_fixup( "vo-note-fixups.json" )
def extract_content( self, pdf ):
def extract_content( self, pdf ): #pylint: disable=too-many-branches
"""Extract content from the MMP eASLRB."""
# figure out which pages to process
@ -166,9 +177,13 @@ class ExtractContent( ExtractBase ):
# process each element on the page
curr_caption = None
self._top_left_elem = self._prev_elem = None
elem_filter = lambda e: isinstance( e, LTChar )
def elem_filter( elem ):
return isinstance( elem, LTChar )
sort_elems = self._curr_pageid not in disable_sort_items and str(page_no) not in disable_sort_items
for _, elem in PageElemIterator( lt_page, elem_filter=elem_filter, sort_elems=sort_elems ):
centre_adjust = 35 if self._curr_footnote is not None and self._curr_chapter != "W" else 0
for _, elem in PageElemIterator( lt_page, centre_adjust=centre_adjust,
elem_filter=elem_filter, sort_elems=sort_elems
):
# skip problematic elements
if elem.fontname == "OYULKV+MyriadPro-Regular":
@ -237,6 +252,17 @@ class ExtractContent( ExtractBase ):
# loop back to process the next element
self._prev_elem = elem
# check for extra targets that need to be added in
extra_targets = self._target_fixups.get( self._curr_pageid, {} ).pop( "extras", [] )
if extra_targets:
if not self._target_fixups[ self._curr_pageid ]:
del self._target_fixups[ self._curr_pageid ]
for extra_target in extra_targets:
self.targets[ extra_target["ruleid"] ] = {
"caption": extra_target.get("caption",""),
"page_no": page_no, "pos": extra_target["pos"]
}
# add the last caption/footnote (if they haven't already been done)
self._save_footnote()
if curr_caption:
@ -276,7 +302,7 @@ class ExtractContent( ExtractBase ):
fixup = self._target_fixups.get( self._curr_pageid, {} ).get( caption_text )
if fixup:
# yup - make it so
fixup[ "instances" ] = fixup.get("instances",1) - 1
fixup[ "instances" ] = fixup.get( "instances", 1 ) - 1
if fixup["instances"] <= 0:
self._target_fixups[ self._curr_pageid ].pop( caption_text )
if not self._target_fixups[ self._curr_pageid ]:
@ -324,7 +350,14 @@ class ExtractContent( ExtractBase ):
"""Process an element while we're parsing footnotes."""
# check if we've found the start of a new footnote
if self._is_bold( elem ):
if elem.get_text().isdigit() and self._is_start_of_line( elem, lt_page ):
# FUDGE! The new Chapter F has things like "13. 7.1 SAND", which fooled the code into thinking
# that the "7" was the start of a new footnote (because it's a digit, and near the start
# of the line :-/), so we check for that case here.
def check():
if self._curr_chapter != "F" or not self._curr_footnote:
return False
return any( self._curr_footnote[0] == "{}. ".format(i) for i in range(10,15+1) )
if elem.get_text().isdigit() and self._is_start_of_line( elem, lt_page ) and not check():
# yup - save the current footnote, start collecting the new one
self._save_footnote()
elem_pos = ( elem.x0, elem.y1 )
@ -459,7 +492,7 @@ class ExtractContent( ExtractBase ):
# check for the credits at the end of the Chapter F footnotes
if self._curr_chapter == "F":
pos = content.find( "WEST OF ALAMEIN CREDITS" )
pos = content.find( "HOLLOW LEGIONS CREDITS" )
if pos > 0:
content = content[:pos]
# check for the start of the vehicle notes at the end of the Chapter H footnotes
@ -476,6 +509,21 @@ class ExtractContent( ExtractBase ):
"raw_content": orig_content
} )
self._curr_footnote = None
split = self._footnote_fixups.get( self._curr_chapter, {} ).pop( "split:"+footnote_id, None )
if split:
content = self._footnotes[self._curr_chapter][-1]["content"]
pos = content.find( split["split_text"] )
if pos < 0:
self.log_msg( "warning", "Can't find footnote split: {}", split["split_text"] )
else:
new_content = content[ pos + len( split["split_text"] ) : ]
self._footnotes[self._curr_chapter][-1]["content"] = content[:pos].strip()
self._footnotes[ self._curr_chapter ].append( {
"footnote_id": split["new_footnote_id"],
"captions": split["new_captions"],
"content": new_content.strip(),
"raw_content": None
} )
def _save_vo_note( self, caption, page_no, page_pos ):
"""Save an extracted vehicle/ordnance note."""
@ -493,7 +541,9 @@ class ExtractContent( ExtractBase ):
if not self._vo_note_fixups["skips"]:
del self._vo_note_fixups["skips"]
return
if caption.isdigit() and page_no not in (354, 417):
if caption.isdigit():
return
if not any( ch.isalpha() for ch in caption ):
return
def apply_fixups( vo_note_id, caption ):
@ -509,6 +559,9 @@ class ExtractContent( ExtractBase ):
vo_note_id = fixup["new_vo_note_id"]
if "new_caption" in fixup:
caption = fixup["new_caption"]
if "new_page_pos" in fixup:
nonlocal page_pos
page_pos = fixup["new_page_pos"]
return vo_note_id, caption
def cleanup_fixups( nat, vo_type ):
@ -566,7 +619,11 @@ class ExtractContent( ExtractBase ):
elif base_note_id == prev_base_note_id and "." in vo_note_id:
pass # nb: this is to allow things like "9.1" following "9"
else:
return # nb: we got some junk that can be ignored
# NOTE: We used to return here, which ignored a lot of junk, but it has the unfortunate
# side-effect of, if we missed an entry, then all entries after that would also be missed.
# With v2.01 of the eASLRB, there were changes to the layout that weren't really fixable
# using the fixup data files, so we bit the bullet and manually ignore the junk :-/
pass
# save the V/O note
self._vo_notes[ nat ][ vo_type ].append( {
@ -783,7 +840,7 @@ class ExtractContent( ExtractBase ):
jsonval(caption[1]), jsonval(caption[0])
)
chapters = []
for chapter in self._footnotes:
for chapter in self._footnotes: #pylint: disable=consider-using-dict-items
footnotes = []
for footnote in self._footnotes[chapter]:
footnotes.append( "{}: {{\n \"captions\": {},\n \"content\": {}\n}}".format(

@ -25,7 +25,11 @@
"replace": {
"B32": [ "RAILROADS10", "Railroads" ],
"B34": [ "TOWERS18", "Towers" ],
"B36": [ "PREPARED FIRE ZONE2", "Prepared Fire Zone" ]
"B36": [ "PREPARED FIRE ZONE2", "Prepared Fire Zone" ],
"F1": [ "OPEN GROUND", "Open Ground" ],
"F2": [ "SCRUB", "Scrub" ],
"F4": [ "DEIRS", "Deirs" ],
"F5": [ "WADIS", "Wadis" ]
}
}

@ -2,20 +2,14 @@
"A": {
"10A": [
[ "OneHalfFP", "One-Half FP" ],
[ "firstappearedintheASLAnnual'89.(In1998,bothwerereprintedin Classic ASL.)", "first appeared in the ASL Annual '89. (In 1998, both were reprinted in Classic ASL.)" ],
[ "One of the several criticisms", "<p> One of the several criticisms" ]
],
"12": [ [ "TEMto", "TEM to" ] ],
"14": [
[ "bipodmounted", "bipod-mounted" ],
[ "volume o f fire", "volume of fire" ]
],
"17": [ [ "adistinct", "a distinct" ] ],
"19" : [ [ "wellsited", "well-sited" ] ],
"32": [ [ "HWunits", "HW units" ] ],
"33": [ [ "multiLocation", "multi-Location" ] ],
"10A": [ [ "One of the several criticisms", "<p> One of the several criticisms" ] ],
"14": [ [ "bipodmounted", "bipod-mounted" ] ],
"19": [ [ "wellsited", "well-sited" ] ],
"split:31A": {
"split_text": "31B. 25.212 RUSSIAN EARLY WAR DOCTRINE:",
"new_footnote_id": "31B",
"new_captions": [ [ "A25.212", "RUSSIAN EARLY WAR DOCTRINE" ] ]
},
"35": [ [ "The original printing", "<p> The original printing" ] ],
"37": [
[ "- Winter War (vs Soviet Union) 30 November 1939 - 13 March 1940- Continuation War (vs Soviet Union) 25 June 1941 - 4 September 1944- Lapland War (vs Germany) 15 September 1944 - 27 April 1945", " <ul> <li> <b>Winter War</b> (vs Soviet Union) 30 November 1939 - 13 March 1940 <li> <b>Continuation War</b> (vs Soviet Union) 25 June 1941 - 4 September 1944 <li> <b>Lapland War</b> (vs Germany) 15 Se ptember 1944 - 27 April 1945 </ul>" ]
@ -26,16 +20,12 @@
[ "Slovakia: Urged on", "<p> <b>Slovakia</b>: Urged on" ],
[ "German-Croatian units in Russia:", " <p> <b>German-Croatian units in Russia</b>:" ],
[ "Italian-Croatian units in Russia:", " <p> <b>Italian-Croatian units in Russia</b>:" ],
[ "existing GermanCroatian units", "existing German-Croatian units" ],
[ "Croatian units in Yugoslavia:", " <p> <b>Croatian units in Yugoslavia</b>:" ],
[ "CroatianArmyunitswereengagedprimarilyinanti-partisanactivities,fightingmostly", "Croatian Army units were engaged primarily in anti-partisan activities, fighting mostly" ],
[ "Bulgaria: Bulgaria", "<p> <b>Bulgaria</b>: Bulgaria" ],
[ "WhiletheriflecompanydidnothaveaninherentHeavyWeapons(HW)platoon,it", "While the rifle company did not have an inherent Heavy Weapons (HW) platoon, it"]
[ "Bulgaria: Bulgaria", "<p> <b>Bulgaria</b>: Bulgaria" ]
],
"39": [ [ "generallyapply", "generally apply" ] ],
"41": [ [ "ViceAdmiral", "Vice-Admiral" ] ],
"43": [
[ "ALLIEDMINORS", "ALLIED MINORS" ],
[ "BARrather", "BAR rather" ]
"42": [
[ "twoman", "two-man" ]
]
},
@ -107,51 +97,14 @@
"F": {
"split:7": {
"split_text": "8. 5.1 WADIS:",
"new_footnote_id": "8",
"new_captions": [ [ "F5.1", "WADIS" ] ]
},
"12": [ [ "non- entrenched", "non-entrenched" ] ],
"19": [
[ "Inthewinternight,thenear-freezingtemperaturecauseddewtoform.", "In the winter night, the near-freezing temperature caused dew to form. " ],
[ "Thenextmorningathickmistoftenformedasthesun evaporateditagain.", "The next morning a thick mist often formed as the sun evaporated it again. " ],
[ "Thiscouldhappeneveninthesummertimeundertheproperenvironmentalconditions,", "This could happen even in the summertime under the proper environmental conditions, " ],
[ "butsincethiswasamuchlessfrequentoccurrenceithasbeen ignored.", "but since this was a much less frequent occurrence it has been ignored." ]
],
"21": [
[ "Playerswillprobablyfinditmoreconvenienttoinstead", "Players will probably find it more convenient to instead" ],
[ "addathird,different-coloreddietothisTH/IFTDR,", "add a third, different-colored die to this TH/IFT DR, " ],
[ "usingittodeterminetheDust DRM.", "using it to determine the Dust DRM." ],
[ "Thefamiliarterm\"subsequentdr\"wasusedintherulebecauseitobviates theneed", "The familiar term \"subsequent dr\" was used in the rule because it obviates the need" ],
[ "a\"new\"concept", "a \"new\" concept" ],
[ "thatof rolling athird diesimultaneously", "that of rolling a third die simultaneously" ]
],
"22": [
[ "theDustcounter\"follows\"thevehicleasit movesfromhex to hex", "the Dust counter \"follows\" the vehicle as it moves from hex to hex" ],
[ "itexpends", "it expends " ],
[ "two MPeach timeitdoesso", " two MP each time it does so" ]
],
"23": [
[ "Anotherwind-relatedaspectoftheNorthAfricanenvironmentisthedesertsandstorm,", "Another wind-related aspect of the North African environment is the desert sandstorm, " ],
[ "orkhamsininArabic.", "or khamsin in Arabic. " ],
[ "ChapterFincludesnospecial rulesforitbecause,", "Chapter F includes no special rules for it because, " ],
[ "withvisibilitycutbythestormtoaslittleasthreeyards,", "with visibility cut by the storm to as little as three yards, " ],
[ "allactivitiesgenerallywerereducedtoseekingcoverfromthesandblastingwindandchoking dust.", "all activities generally were reduced to seeking cover from the sandblasting wind and choking dust. " ],
[ "However,thegamedoesnotignorethepossibilityofakhamsin'soccurrence.", "However, the game does not ignore the possibility of a khamsin's occurrence. " ],
[ "The propercombinationofWeather,EC,WindandGustsinaDYOscenariocancreateits effects,", "The proper combination of Weather, EC, Wind and Gusts in a DYO scenario can create its effects, " ],
[ "andtheprobabilityofitsoccurrenceisgreatestinascenariosetinspringor summer", "and the probability of its occurrence is greatest in a scenario set in spring or summer" ],
[ "thetimewhen khamsinsoccurred mostfrequently.", "the time when khamsins occurred most frequently." ]
],
"24": [
[ "Thisoverlay isused in aHOLLOW LEGIONS scenario.", "This overlay is used in a HOLLOW LEGIONS scenario." ]
],
"25": [
[ "ThefamousNorthAfricanescarpmentsaresimilarto cliffs,", "The famous North African escarpments are similar to cliffs, " ],
[ "butwithlesssteep(andveryeroded)slopes.", "but with less steep (and very eroded) slopes. " ],
[ "Somearesixhundredfeethigh", "Some are six hundred feet high" ],
[ "thoughgenerallytheirheightsrangefromonehundredtotwohundredfeet.", "though generally their heights range from one hundred to two hundred feet. " ],
[ "Theirsignificanceinthedesertwarlaymainlyinthattheywerecommandingheights,", "Their significance in the desert war lay mainly in that they were commanding heights, " ] ,
[ "defensivepositionsforinfantry,", "defensive positions for infantry, " ],
[ "andgreatlyrestrictedvehicularmovementacrossthem", "and greatly restricted vehicular movement across them" ],
[ "Hencetheywereoftenthesceneofheavyfighting,", "Hence they were often the scene of heavy fighting, " ],
[ "especiallywherecrossedbya road", "especially where crossed by a road" ]
]
"19": [ [ "nearfreezing", "near-freezing" ] ]
},
"G": {

@ -66,6 +66,13 @@
"new_content": "(Any fire attack requiring a LOS from the firer which does not use Indirect Fire): C.1, C9.1 [Intervening Units: A6.6] [LC: G12.61-.62, G12.671]"
},
"Dispersed Smoke": {
"replace": [
[ "SmokeGrenadesNA", "Smoke Grenades NA" ],
[ "VehicularSmoke", "Vehicular Smoke" ]
]
},
"Dogfight": {
"old_content": "(AerialCombat):E7.22",
"new_content": "(Aerial Combat): E7.22"

@ -108,18 +108,10 @@
}
},
"A54": {
"25.53 FREEFRENCH:": {
"new_ruleid": "A25.53",
"new_caption": "FREE FRENCH"
}
},
"A55": {
"26.VICTORYCONDITIONS": {
"new_ruleid": "A26",
"new_caption": "VICTORY CONDITIONS"
}
"extras": [
{ "ruleid": "A25.71", "caption": "LEADERS", "pos": [172,714] }
]
},
"B1": {
@ -132,7 +124,10 @@
"T10": { "new_ruleid": null },
"W5": { "new_ruleid": null },
"V5": { "new_ruleid": null },
"X6": { "new_ruleid": null }
"X6": { "new_ruleid": null },
"Y6": { "new_ruleid": null },
"Y7": { "new_ruleid": null },
"Y10": { "new_ruleid": null }
},
"B4": {
@ -348,10 +343,86 @@
"3)": { "new_ruleid": null }
},
"F18": {
"D3": { "new_ruleid": null },
"W1": { "new_ruleid": null },
"H4": { "new_ruleid": null }
"F1": {
"8½ 1 3 3 5 1 7": { "new_ruleid": null },
"1 1": { "new_ruleid": null },
"17": { "new_ruleid": null }
},
"F2": {
"1. OPEN GROUND 1.1": {
"new_ruleid": "F1",
"new_caption": "OPEN GROUND"
},
"2. SCRUB 2.1": {
"new_ruleid": "F2",
"new_caption": "SCRUB"
},
"extras": [
{ "ruleid": "F1.1", "pos": [18,330] },
{ "ruleid": "F2.1", "pos": [18,173] },
{ "ruleid": "F3", "caption": "HAMMADA", "pos": [294,592] },
{ "ruleid": "F3.1", "pos": [294,582] }
]
},
"F3": {
"4. DEIRS 7 4.1": {
"new_ruleid": "F4",
"new_caption": "DEIRS"
},
"5. WADIS 8 5.1": {
"new_ruleid": "F5",
"new_caption": "WADIS"
},
"extras": [
{ "ruleid": "F4.1", "pos": [66,109] },
{ "ruleid": "F5.1", "pos": [342,166] }
]
},
"F6": {
"3 TEM:": {
"new_ruleid": "F5.423",
"new_caption": "TEM"
}
},
"F7": {
"extras": [
{ "ruleid": "F6", "caption": "HILLOCKS", "pos": [66,276] }
]
},
"F8": {
"extras": [
{ "ruleid": "F7", "caption": "SAND", "pos": [294,531] },
{ "ruleid": "F7.1", "pos": [294,520] }
]
},
"F12": {
"11.4": {
"new_ruleid": "F11.4",
"new_caption": "ENVIRONMENTAL CONDITIONS"
},
"18": { "new_ruleid": null },
"extras": [
{ "ruleid": "F11.3", "caption": "TIME OF DAY", "pos": [294,529] }
]
},
"F14": {
"20 11.7 DUST:": {
"new_ruleid": "F11.7",
"new_caption": "DUST"
},
"21": { "new_ruleid": null },
"22": { "new_ruleid": null }
},
"F15": {
"23": { "new_ruleid": null }
},
"G30": {

@ -1,23 +1,27 @@
{
"skips": {
"382": [ "1, 3" ],
"429": [ "^1,660,", "^1and Fiat 3000", "^9/43 armistice", "^4/41 (.9)" ],
"431": [ "^1, for East Africa", "^9/42 (1.4)," ],
"432": [ "^1 (l.2),", "^1. Sources vary" ],
"434": [ "1-", "^1.5 for 11/41-6/42," ],
"438": [ "^1/41-5/43" ],
"439": [ "1 (1", "^1/43 ( 1.2),", "^1/43 (1.3),", "^1/42-5/43." ],
"492": [ "1B11CE/FPNA", "1B11CE/FPNA" ],
"493": [ "1T", "1B" ],
"496": [ "1B" ],
"501": [ "1h-d" ],
"502": [ "1s5", "1s5" ],
"503": [ "1AP5", "1s6" ],
"504": [ "^1.3)" ],
"514": [ "1.4 for 45" ],
"556": [ "1#" ],
"560": [ "1, 3" ]
"342": [ "5.1 GSW 39H(f) PaK:" ],
"345": [ "4 FP", "4 FP", "6 FP", "4 FP", "4 FP", "4 FP", "6 FP", "4 FP" ],
"355": [ "26S M37/39:" ],
"422": [ "30-cwt Lorry:", "3-Ton Lorry:" ],
"463": [ "243M2" ],
"466": [ "^50mm RM obr. 38," ],
"468": [ "20/65, & 2cm FlaK 30:" ],
"485": [ "4, M4A1, & M4A2 Medium Tanks:" ],
"498": [ "11:Stall", "11:Stall", "1B11CE/FPNA", "1B11CE/FPNA", "2x" ],
"499": [ "1T", "1B", "2B11B11" ],
"501": [ "4PP", "5PPcs 5", "5PP" ],
"502": [ "1B" ],
"504": [ "2/2CS 6" ],
"505": [ "9PPcs 2", "9PP*" ],
"506": [ "29PP", "21PP", "9PP", "9PP" ],
"507": [ "4M", "1h-d", "11M" ],
"508": [ "8M", "12M", "8M", "1s5", "11M", "12M" ],
"509": [ "3/42 RR1", "10M", "8M", "9M(9/39-s6", "5/40 RF 1.5", "4/41 RF 1.4", "8M", "6M", "^10/39 RF" ],
"510": [ "^1.3)", "^5/40 RF", "8M" ],
"519": [ "8/43; a", "^38(t)E is 1.3" ],
"520": [ "1.4 for 45", "44, and 1.4 for 45" ]
},
"german": {
@ -63,16 +67,22 @@
"new_caption": "2cm & 3.7cm FlaK LKW"
},
"96": {
"old_caption": "Opel 6700 &Buessing-NAG",
"old_caption": "Opel 6700 &",
"new_caption": "Opel 6700 & Buessing-NAG 4500"
},
"add": [
{ "_comment_": "This gets parsed as '4' and '5.1 GSW 39H(f) PaK' :-/",
"vo_note_id": "45.1", "caption": "GSW 39H(f) PaK", "page_no": 337, "page_pos": [380,561]
"vo_note_id": "45.1", "caption": "GSW 39H(f) PaK", "page_no": 342, "page_pos": [380,561]
},
{ "vo_note_id": "37.1", "caption": "Sturmtiger", "page_no": 532, "page_pos": [118,640] },
{ "vo_note_id": "88.1", "caption": "SdKfz 10/5", "page_no": 532, "page_pos": [399,713] }
{ "vo_note_id": "37.1", "caption": "Sturmtiger", "page_no": 538, "page_pos": [118,640] },
{ "vo_note_id": "88.1", "caption": "SdKfz 10/5", "page_no": 538, "page_pos": [399,713] }
]
},
"ordnance": {
"29": {
"old_caption": "3.7cm FlaK 43: O",
"new_caption": "3.7cm FlaK 43"
}
}
},
@ -99,7 +109,9 @@
"new_caption": "ISU-122 & ISU-152"
},
"add": [
{ "vo_note_id": "12.1", "caption": "T-28E M40(L)", "page_no": 364, "page_pos": [394,289] }
{ "vo_note_id": "47", "caption": "", "page_no": 359, "page_pos": [461,580] },
{ "vo_note_id": "12.1", "caption": "T-28E M40(L)", "page_no": 369, "page_pos": [394,289] },
{ "vo_note_id": "48", "caption": "Stuart III(a)", "page_no": 371, "page_pos": [394,515] }
]
}
},
@ -111,7 +123,7 @@
"new_caption": "M4A3E2 & M4A3E2 (L) Medium Tanks"
},
"17": {
"old_caption": "M4(105) & M4A3(105) MediumTanks",
"old_caption": "M4(105) & M4A3(105) Medium",
"new_caption": "M4(105) & M4A3(105) Medium Tanks"
}
}
@ -120,7 +132,7 @@
"british": {
"vehicles": {
"2": {
"old_caption": "(A17) Tetrarch & Tetrarch CS[Light Tanks Mk VII & Mk VII CS]",
"old_caption": "(A17) Tetrarch & Tetrarch CS",
"new_caption": "(A17) Tetrarch & Tetrarch CS [Light Tanks Mk VII & Mk VII CS]"
},
"6": {
@ -132,211 +144,34 @@
"new_caption": "(A12) Matilda II & II CS [Infantry Tank Mk II]"
},
"36": {
"old_caption": "Valentine & Churchill Bridgelay-ers",
"old_caption": "Valentine & Churchill Bridgelay-",
"new_caption": "Valentine & Churchill Bridgelayers"
},
"45": {
"old_caption": "Humber III & Otter Light Re-connaissance Cars",
"new_caption": "Humber III & Otter Light Reconnaissance Cars"
},
"82": {
"old_caption": "",
"new_caption": "30-cwt Lorry"
"54": {
"old_caption": "Staghound I(a) & II(a) Armoured",
"new_caption": "Staghound I(a) & II(a) Armoured Cars"
},
"83": {
"old_caption": "",
"new_caption": "3-Ton Lorry"
"add": [
{ "vo_note_id": "82", "caption": "30-cwt Lorry", "page_no": 422, "page_pos": [116,475] },
{ "vo_note_id": "83", "caption": "3-Ton Lorry", "page_no": 422, "page_pos": [116,287] }
]
},
"ordnance": {
"16": {
"old_caption": "OBL 4.5-in. Gun & 5.5-in. Gun-",
"new_caption": "OBL 4.5-in. Gun & 5.5-in. Gun-Howitzer"
}
}
},
"italian": {
"vehicles": {
"1": {
"old_caption": "LS/21 & LS/3",
"new_caption": "L5/21 & L5/30"
},
"2": {
"old_caption": "^L3/35: Derived from",
"new_caption": "L3/35"
},
"3": {
"old_caption": "^L3 aa: Some L3",
"new_caption": "L3 aa"
},
"4": {
"old_caption": "^L3 cc: During the early months",
"new_caption": "L3 cc"
},
"5": {
"old_caption": "^L3 Lf: Development of",
"new_caption": "L3 Lf"
},
"6": {
"old_caption": "^L6/40: Designed to replace",
"new_caption": "L6/40"
},
"7": {
"old_caption": "^Mll/39: This tank carried",
"new_caption": "M11/39"
},
"8": {
"old_caption": "^Ml3/40: Replacing the",
"new_caption": "M13/40"
},
"9": {
"old_caption": "^M14/41: This tank,",
"new_caption": "M14/41"
},
"10": {
"old_caption": "^M15/42: This, the last version",
"new_caption": "M15/42"
},
"11": {
"old_caption": "^MR/35(f): The Germans provided",
"new_caption": "MR/35(f)"
},
"12": {
"old_caption": "Semovente M40 & M41 da",
"new_caption": "Semovente M40 & M41 da 75/18"
},
"13": {
"old_caption": "^Semovente M42 da 75/1&75/32: The last model",
"new_caption": "Semovente M42 da 75/18 & 75/32"
},
"14": {
"old_caption": "^Semovente M43 da 105/25: Nicknathe",
"new_caption": "Semovente M43 da 105/25"
},
"15": {
"old_caption": "Semovente L40 da 47/32: The SMV",
"new_caption": "Semovente L40 da 47/32"
},
"16": {
"old_caption": "^Semovente M41M da 90/53: This AFV",
"new_caption": "Semovente M41M da 90/53"
},
"18": {
"old_caption": "^Lince: The Lince (Lynx)",
"new_caption": "Lince"
},
"19": {
"old_caption": "^Lancia lZM: In late 1912",
"new_caption": "Lancia 1ZM"
},
"20": {
"old_caption": "^Fiat 611A & 611BThese armoredcars",
"new_caption": "Fiat 611A & 611B"
},
"21": {
"old_caption": "^AB 40 & AB41These two auto",
"new_caption": "AB 40 & AB 41"
},
"22": {
"old_caption": "^Autoprotetto S37: This APC",
"new_caption": "Autoprotetto S37"
},
"23": {
"old_caption": "Autocannoni da",
"new_caption": "Autocannoni da 20/65(b) & 65/17(b)"
},
"24": {
"old_caption": "Autocannoni da",
"new_caption": "Autocannoni da 75/27 CK & 90/53"
},
"25": {
"old_caption": "^TL 37, TM 40 &TP 32",
"new_caption": "TL 37, TM 40 & TP 32"
},
"26": {
"old_caption": "^Autocarretta: As the portee",
"new_caption": "Autocarretta"
},
"27": {
"old_caption": "^Fiat 508 MC: Derived from",
"new_caption": "Fiat 508 MC"
},
"28": {
"old_caption": "^Autocarri L, M & P: The ItalianArmy",
"new_caption": "Autocarri L, M & P"
}
},
"ordnance": {
"1": {
"old_caption": "^Mortaio da 45 \"Brixia\": This weapon,",
"new_caption": "Mortaio da 45 \"Brixia\""
},
"2": {
"old_caption": "^Mortaio da 81/14: First usedi",
"new_caption": "Mortaio da 81/14"
},
"3": {
"old_caption": "^Fucile-cc S: Like several other",
"new_caption": "Fucile-cc S"
},
"4": {
"old_caption": "^Cannone-cc da 37/45: This was",
"new_caption": "Cannone-cc da 37/45"
},
"5": {
"old_caption": "^Cannone da 47/32: This was",
"new_caption": "Cannone da 47/32"
},
"6": {
"old_caption": "^Cannone da 65/17: This was",
"new_caption": "Cannone da 65/17"
},
"7": {
"old_caption": "^Cannone da 70/15: This",
"new_caption": "Cannone da 70/15"
},
"8": {
"old_caption": "^Obice da 75/13: The Skoda",
"new_caption": "Obice da 75/13"
},
"9": {
"old_caption": "^Cannone da 75/27: This was",
"new_caption": "Cannone da 75/27"
},
"10": {
"old_caption": "^Obice da 75/18: This game piece",
"new_caption": "Obice da 75/18"
},
"11": {
"old_caption": "^Cannone da 75/32: The 75/32",
"new_caption": "Cannone da 75/32"
},
"12": {
"old_caption": "^Obice da 100/17: Another old",
"new_caption": "Obice da 100/17"
},
"13": {
"old_caption": "^Cannone da 105/28: This was",
"new_caption": "Cannone da 105/28"
},
"14": {
"old_caption": "^Obice da 149/13: This piece",
"new_caption": "Obice da 149/13"
},
"15": {
"old_caption": "^Cannone da 149/35: Another",
"new_caption": "Cannone da 149/35"
},
"16": {
"old_caption": "^Cannone da 149/40: To replace",
"new_caption": "Cannone da 149/40"
},
"17": {
"old_caption": "^Cannone-mitragliera da 20/65: Thiswas",
"new_caption": "Cannone-mitragliera da 20/65"
},
"18": {
"old_caption": "^Cannone-aa da 75/39: This was",
"new_caption": "Cannone-aa da 75/39"
},
"add": [
{ "vo_note_id": "19", "caption": "Cannone-aa da 75/46", "page_no": 439, "page_pos": [283,42] },
{ "vo_note_id": "20", "caption": "Cannone-aa da 90/53", "page_no": 439, "page_pos": [384,541] }
{ "vo_note_id": "12", "caption": "Semovente M40 & M41 da 75/18", "page_no": 437, "page_pos": [442,715] }
]
}
},
@ -383,17 +218,16 @@
"new_caption": "Year-3 Type 14cm Naval Seacoast Gun"
},
"20": {
"old_caption": "Type 93 Twin-Mount High-Angle Ma-chine Gun",
"old_caption": "Type 93 Twin-Mount High-Angle Ma-",
"new_caption": "Type 93 Twin-Mount High-Angle Machine Gun"
},
"22": {
"old_caption": "Type 96 Single-, Twin-, & Triple-Mount Naval High-Angle Machine Can-",
"new_caption": "Type 96 Single-, Twin-, & Triple-Mount Naval High-Angle Machine Cannons"
},
"24": {
"old_caption": "Year-10 Type 12cm Naval High-AngleGun",
"new_caption": "Year-10 Type 12cm Naval High-Angle Gun"
}
},
"add": [
{ "vo_note_id": "22", "caption": "Type 96 Single-, Twin-, & Triple-Mount Naval High-Angle Machine Cannons", "page_no": 458, "page_pos": [395,713] }
]
}
},
@ -433,31 +267,51 @@
"french": {
"vehicles": {
"20": {
"old_caption": "Autocanon de 75 mle 97 & Autocanonde 75 Conus(b)",
"new_caption": "Autocanon de 75 mle 97 & Autocanon de 75 Conus(b)"
"old_caption": "CA, & Autocanon de 25 CA",
"new_caption": "Autocanon de 75 mle 97 & Autocanon de 75 Conus(b)",
"new_page_pos": [395,715]
},
"21": {
"old_caption": "Camion de Mitrailleuse Contre-Avions, Camion de 13.2 CAJ, Camion de",
"new_caption": "Camion de Mitrailleuse Contre-Avions, Camion de 13.2 CAJ, Camion de 20 CA, & Autocanon de 25 CA"
},
"36": {
"old_caption": "Peugeot 202, Citroën 23, & RenaultAGR2",
"old_caption": "Peugeot 202, Citroën 23, & Renault#NaAGR2",
"new_caption": "Peugeot 202, Citroën 23, & Renault AGR2"
},
"38": {
"old_caption": "Cr",
"new_caption": "Crusader II & III Tanks"
},
"39": {
"old_caption": "M",
"new_caption": "M4, M4A1, & M4A2 Medium Tanks"
},
"40": {
"old_caption": "M4A3(75)W, M4A3(76)W, & M4A3(105) Medium Tanks, & M4Tankdozer",
"new_caption": "M4A3(75)W, M4A3(76)W, & M4A3(105) Medium Tanks, & M4 Tankdozer"
}
},
"add": [
{ "vo_note_id": "11", "caption": "D2 & D2(L)", "page_no": 479, "page_pos": [395,715] },
{ "vo_note_id": "15", "caption": "AM Dodge(a)", "page_no": 480, "page_pos": [444,715] }
]
},
"ordnance": {
"6": {
"old_caption": "Canon Antichar de 47SA mle 37 APX",
"old_caption": "Canon Antichar de 47",
"new_caption": "Canon Antichar de 47 SA mle 37 APX"
},
"18": {
"old_caption": "Mitrailleuse de 13.2 CAJmle 30",
"new_caption": "Mitrailleuse de 13.2 CAJ mle 30"
}
},
"27": {
"old_caption": "M2A1 & M3 105mm Howitzers: C",
"new_caption": "M2A1 & M3 105mm Howitzers"
},
"add": [
{ "vo_note_id": "26", "caption": "OQF 25-Pounder Gun-Howitzer", "page_no": 493, "page_pos": [18,715] }
]
}
},
@ -494,7 +348,7 @@
"new_caption": "M3A3(a) FlaK 38"
},
"29": {
"old_caption": "Marmon-Herrington III(b) Armored",
"old_caption": "Marmon-Herrington III(b) Armored*2 TK DR",
"new_caption": "Marmon-Herrington III(b) Armored Cars"
},
"31": {
@ -502,7 +356,7 @@
"new_caption": "L5/30(i) & L3/35(i) & L6/40(i) & M13/40(i)"
},
"37": {
"old_caption": "Light Truck & Medium Truck &",
"old_caption": "Light Truck & Medium Truck &Heavy Truck",
"new_caption": "Light Truck & Medium Truck & Heavy Truck"
}
},
@ -513,7 +367,7 @@
"new_caption": "75M 19S"
},
"add": [
{ "vo_note_id": "20", "caption": "3.7cm Infantry Gun", "page_no": 502, "page_pos": [393,616] }
{ "vo_note_id": "20", "caption": "3.7cm Infantry Gun", "page_no": 508, "page_pos": [393,616] }
]
}
},
@ -555,10 +409,17 @@
"new_vo_note_id": "16",
"new_caption": "40M Nimrod"
},
"39": {
"old_caption": "PzKpfw IVD(g), PzKpfw IVF1(g),",
"new_caption": "PzKpfw IVD(g), PzKpfw IVF1(g), & PzKpfw IVH(g)"
},
"50": {
"old_caption": "Light Truck, Medium Truck, &Heavy Truck",
"new_caption": "Light Truck, Medium Truck, & Heavy Truck"
}
},
"add": [
{ "vo_note_id": "48", "caption": "RSO(g)", "page_no": 521, "page_pos": [394,713] }
]
},
"ordnance": {
"20": {
@ -689,7 +550,10 @@
"old_caption": "ItK/31(r)",
"new_vo_note_id": "39",
"new_caption": "76 ItK/31(r)"
}
},
"add": [
{ "vo_note_id": "24", "caption": "105 H/37", "page_no": 553, "page_pos": [394,713] }
]
}
},
@ -708,7 +572,7 @@
"new_caption": "M4A3E8(a) Medium Tank & M4A3E8 Dozer(a)"
},
"47": {
"old_caption": "Oxford Carrier, MMG & Oxford Car-rier, HMG",
"old_caption": "Oxford Carrier, MMG & Oxford Car-",
"new_caption": "Oxford Carrier, MMG & Oxford Carrier, HMG"
},
"57": {

@ -15,7 +15,7 @@ from asl_rulebook2.utils import parse_page_numbers, fixup_text, extract_parens_c
# ---------------------------------------------------------------------
_DEFAULT_ARGS = {
"pages": "10-41",
"pages": "10-42",
"index_vp_left": 0, "index_vp_right": 565, "index_vp_top": 715, "index_vp_bottom": 20, # viewport
"first_title": "a", "last_title": "X#", # first/last index entries
}
@ -53,7 +53,8 @@ class ExtractIndex( ExtractBase ):
# process each element on the page
self._prev_y0 = 99999
elem_filter = lambda e: isinstance( e, LTChar )
def elem_filter( elem ):
return isinstance( elem, LTChar )
for _, elem in PageElemIterator( lt_page, elem_filter=elem_filter ):
# check if we should ignore this element

@ -111,7 +111,7 @@ class PageIterator:
class PageElemIterator:
"""Iterate over each element in a page."""
def __init__( self, lt_page, elem_filter=None, sort_elems=False ):
def __init__( self, lt_page, elem_filter=None, sort_elems=False, centre_adjust=0 ):
self.lt_page = lt_page
# collect all the elements (so that they can be sorted)
self._elems = []
@ -127,7 +127,7 @@ class PageElemIterator:
walk( lt_page, 0 )
if sort_elems:
def sort_key( elem ):
col_no = 0 if elem[1].x0 < lt_page.width/2 else 1
col_no = 0 if elem[1].x0 < lt_page.width/2 + centre_adjust else 1
# NOTE: Some elements that should be aligned are actually misaligned by a miniscule amount (e.g. 10^-5),
# so to stop this from resulting in the wrong sort order, we truncate the decimal places.
# NOTE: Characters are often rendered in different fonts, with bounding boxes that don't align neatly.

@ -32,7 +32,8 @@ def test_extract_index():
# check the results
fname = os.path.join( dname, "index.txt" )
assert open( fname, "r", encoding="utf-8" ).read() == buf
with open( fname, "r", encoding="utf-8" ) as fp:
assert fp.read() == buf
# run the test
for_each_easlrb_version( do_test )
@ -61,13 +62,17 @@ def test_extract_content():
# check the results
fname2 = os.path.join( dname, "targets.txt" )
assert open( fname2, "r", encoding="utf-8" ).read() == targets_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == targets_buf
fname2 = os.path.join( dname, "chapters.txt" )
assert open( fname2, "r", encoding="utf-8" ).read() == chapters_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == chapters_buf
fname2 = os.path.join( dname, "footnotes.txt" )
assert open( fname2, "r", encoding="utf-8" ).read() == footnotes_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == footnotes_buf
fname2 = os.path.join( dname, "vo-notes.txt" )
assert open( fname2, "r", encoding="utf-8" ).read() == vo_notes_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == vo_notes_buf
# run the test
for_each_easlrb_version( do_test )
@ -99,15 +104,20 @@ def test_extract_all():
# check the results
fname2 = os.path.join( dname, "index.json" )
assert open( fname2, "r", encoding="utf-8" ).read() == index_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == index_buf
fname2 = os.path.join( dname, "targets.json" )
assert open( fname2, "r", encoding="utf-8" ).read() == targets_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == targets_buf
fname2 = os.path.join( dname, "chapters.json" )
assert open( fname2, "r", encoding="utf-8" ).read() == chapters_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == chapters_buf
fname2 = os.path.join( dname, "footnotes.json" )
assert open( fname2, "r", encoding="utf-8" ).read() == footnotes_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == footnotes_buf
fname2 = os.path.join( dname, "vo-notes.json" )
assert open( fname2, "r", encoding="utf-8" ).read() == vo_notes_buf
with open( fname2, "r", encoding="utf-8" ) as fp:
assert fp.read() == vo_notes_buf
# run the test
for_each_easlrb_version( do_test )

@ -30,7 +30,7 @@ class TempFile:
else:
encoding = "utf-8" if "b" not in self.mode else None
assert self.temp_file is None
self.temp_file = tempfile.NamedTemporaryFile(
self.temp_file = tempfile.NamedTemporaryFile( #pylint: disable=consider-using-with
mode = self.mode,
encoding = encoding,
suffix = self.extn,
@ -73,7 +73,7 @@ def strip_html( val ):
self.strict = False
def handle_data( self, data ):
buf.write( data )
def error( self, message ):
def error( self, message ): #pylint: disable=missing-function-docstring
pass
# strip HTML

@ -2,7 +2,8 @@
import os
from flask import jsonify, render_template_string, send_from_directory, safe_join, url_for, abort
from flask import jsonify, render_template_string, send_from_directory, url_for, abort
from werkzeug.utils import safe_join
from asl_rulebook2.webapp import app
from asl_rulebook2.webapp.content import tag_ruleids
@ -129,5 +130,5 @@ def _render_template( fname ):
"ASOP_BASE_URL": url_for( "get_asop_file", path="" ),
}
args.update( _asop.get( "template_args", {} ) )
with open( fname, "r" ) as fp:
with open( fname, "r", encoding="utf-8" ) as fp:
return fname, render_template_string( fp.read(), **args )

@ -3,7 +3,7 @@
import os
APP_NAME = "ASL Rulebook 2"
APP_VERSION = "v0.2" # nb: also update setup.py
APP_VERSION = "v0.3" # nb: also update setup.py
APP_DESCRIPTION = "Search engine for the ASL Rulebook."
BASE_DIR = os.path.abspath( os.path.join( os.path.dirname(__file__), ".." ) )

@ -16,7 +16,7 @@
"foxhole", "trench", "ditch"
],
"vehicle/vehicles": [
"tank", "halftrack", "half-track", "jeep", "carrier"
"vehicular", "tank", "halftrack", "half-track", "jeep", "carrier"
],
"illumination": [
"tarshell", "illuminating round", "trip flare"

@ -395,3 +395,9 @@ def get_vo_note_targets():
key = "{}/{}_{}".format( cdoc["cdoc_id"], nat, vo_type )
add_targets( targets[nat][vo_type], key, vo_notes[nat][vo_type] )
return jsonify( targets )
# ---------------------------------------------------------------------
def is_main_ruleid( ruleid ):
"""Check if a ruleid is from the main ASLRB."""
return len(ruleid) >= 2 and ruleid[0] in "ABCDEFGHJKW" and ruleid[1] in "123456789."

@ -5,7 +5,8 @@ import io
import re
import markdown
from flask import make_response, send_file, abort, safe_join
from flask import make_response, send_file, abort
from werkzeug.utils import safe_join
from asl_rulebook2.webapp import app
@ -18,7 +19,7 @@ def get_doc( path ):
# locate the documentation file
dname = os.path.join( os.path.dirname( __file__ ), "../../doc/" )
fname = safe_join( dname, path )
if not os.path.isfile( fname ):
if fname is None or not os.path.isfile( fname ):
# FUDGE! If this package has been installed in non-editable mode (i.e. into site-packages, while it's possible
# to get the root doc/ directory included in the installation (by adding a __init__.py file :-/, then including
# it in MANIFEST.in), it ends up in asl-rulebook2's parent directory (i.e. the main site-packages directory),
@ -27,7 +28,7 @@ def get_doc( path ):
# is installed. This won't work on Windows, but we'll do the necessary penance, and just live with it... :-/
dname = os.path.join( os.path.dirname( __file__ ), "data/doc/" )
fname = safe_join( dname, path )
if not os.path.isfile( fname ):
if fname is None or not os.path.isfile( fname ):
abort( 404 )
# check if the file is Markdown

@ -69,7 +69,7 @@ def get_control_tests():
servicer = ControlTestsServicer( app )
add_ControlTestsServicer_to_server( servicer, server )
port_no = parse_int( get_port(), -1 ) # nb: have to get this again?!
if port_no <= 0:
if port_no <= 0: #pylint: disable=consider-using-max-builtin
# NOTE: Requesting port 0 tells grpc to use any free port, which is usually OK, unless
# we're running inside a Docker container, in which case it needs to be pre-defined,
# so that the port can be mapped to an external port when the container is started.

@ -169,9 +169,9 @@ def _do_prepare_data_files( args, download_url ):
with zipfile.ZipFile( zip_data, "w", zipfile.ZIP_DEFLATED ) as zip_file:
fname_stem = "ASL Rulebook"
zip_file.writestr( fname_stem+".pdf", pdf_data )
for key in file_data:
for key, fdata in file_data.items():
fname = "{}.{}".format( fname_stem, key )
zip_file.writestr( fname, file_data[key] )
zip_file.writestr( fname, fdata )
zip_data = zip_data.getvalue()
# notify the front-end that we're done

@ -184,8 +184,8 @@ def init_errata( startup_msgs, logger ):
logger.info( "- Loaded %s.", plural(len(sources),"source","sources") )
# fixup all the errata entries with their real source
for ruleid in _errata:
for anno in _errata[ruleid]:
for ruleid, annos in _errata.items():
for anno in annos:
if "source" in anno:
anno["source"] = sources.get( anno["source"], anno["source"] )

@ -77,7 +77,8 @@ def main( bind_addr, data_dir, force_init_delay, flask_debug ):
def _start_server():
time.sleep( force_init_delay )
url = "http://{}:{}".format( flask_host, flask_port )
_ = urllib.request.urlopen( url )
with urllib.request.urlopen( url ) as fp:
_ = fp.read()
threading.Thread( target=_start_server, daemon=True ).start()
# run the server

@ -22,7 +22,7 @@ import lxml.html
from asl_rulebook2.utils import plural
from asl_rulebook2.webapp import app
from asl_rulebook2.webapp import startup as webapp_startup
from asl_rulebook2.webapp.content import tag_ruleids
from asl_rulebook2.webapp.content import tag_ruleids, is_main_ruleid
from asl_rulebook2.webapp.utils import make_config_path, make_data_path, split_strip
_searchdb_fname = None
@ -168,7 +168,7 @@ def _do_search( args ):
for result in results:
title = result.get( "title", result.get("caption","???") )
_logger.debug( "- %s: %s (%.3f)",
result["_fts_rowid"],
result.get( "_fts_rowid" ),
title.replace( _BEGIN_HIGHLIGHT, "" ).replace( _END_HIGHLIGHT, "" ),
result["_score"]
)
@ -381,7 +381,18 @@ def _adjust_sort_order( results ):
"""Adjust the sort order of the search results."""
results2 = []
def extract_sr( func, force=False ):
def extract_sr( func, check_main_rb=False, force=False ):
if check_main_rb:
# extract search results from the main rulebook first
def is_main_rb( sr ):
ruleids = sr.get( "ruleids", [] )
return ruleids and is_main_ruleid( ruleids[0] )
do_extract_sr(
lambda sr: func( sr ) and is_main_rb( sr ),
force = force
)
do_extract_sr( func, force=force )
def do_extract_sr( func, force ):
# move results that pass the filter function to the new list
i = 0
while True:
@ -402,19 +413,23 @@ def _adjust_sort_order( results ):
# prefer search results whose title is an exact match
extract_sr(
lambda sr: get(sr,"title").startswith( _BEGIN_HIGHLIGHT ) and get(sr,"title").endswith( _END_HIGHLIGHT )
lambda sr: get(sr,"title").startswith( _BEGIN_HIGHLIGHT ) and get(sr,"title").endswith( _END_HIGHLIGHT ),
check_main_rb = True
)
# prefer search results whose title starts with a match
extract_sr(
lambda sr: get(sr,"title").startswith( _BEGIN_HIGHLIGHT )
lambda sr: get(sr,"title").startswith( _BEGIN_HIGHLIGHT ),
check_main_rb = True
)
# prefer search results that have a match in the title
extract_sr(
lambda sr: _BEGIN_HIGHLIGHT in get(sr,"title")
lambda sr: _BEGIN_HIGHLIGHT in get(sr,"title"),
check_main_rb = True
)
# prefer search results that have a match in the subtitle
extract_sr(
lambda sr: _BEGIN_HIGHLIGHT in get(sr,"subtitle")
lambda sr: _BEGIN_HIGHLIGHT in get(sr,"subtitle"),
check_main_rb = True
)
# prefer user annotations
extract_sr(

@ -80,9 +80,7 @@ def init_webapp():
# NOTE: This is quite a slow process (~1 minute for a full data load), which is why we don't do it inline,
# during the normal startup process. So, we start up using the original content, and if the user does
# a search, that's what they will see, but we fix it up in the background, and the new content will
# eventually start to be returned as search results. We could do this process once, and save the results
# in a file, then reload everything at startup, which will obviously be much faster, but we then have to
# figure out when that file needs to be rebuolt :-/
# eventually start to be returned as search results.
if app.config.get( "BLOCKING_STARTUP_TASKS" ):
# NOTE: It's useful to do this synchronously when running the test suite, since if the tests
# need the linkified ruleid's, they can't start until the fixup has finished (and if they don't

@ -9,7 +9,8 @@
#search-results .index-sr>.caption .subtitle { padding-left: 6px ; font-size: 80% ; font-style: italic ; }
#search-results .index-sr>.caption .collapser { height: 16px ; position: absolute ; top: 4px ; right: 4px ; }
#search-results .index-sr .body { padding: 2px 5px 0 5px ; }
#search-results .index-sr .see-also { font-style: italic ; cursor: pointer ; }
#search-results .index-sr .see-also { font-style: italic ; }
#search-results .index-sr .see-also a { cursor: pointer ; }
#search-results .index-sr img.toggle-rulerefs { float: right ; margin: 0 0 0.25em 0.25em ; height: 1.25em ; cursor: pointer ; }
#search-results .index-sr ul.rulerefs .caption { padding-right: 0.5em ; }
#search-results .index-sr ul.rulerefs .ruleid { font-size: 80% ; }

@ -140,7 +140,7 @@ gPrepareApp.component( "upload-panel", {
Click on the button, and select your copy of MMP's eASLRB.
<div class="info"> You <u>must</u> use the <a href="https://www.wargamevault.com/product/344879/Electronic-Advanced-Squad-Leader-Rulebook" target="_blank">offical MMP eASLRB</a>. <br>
A scan of a printed rulebook <u>will not work</u>!
<p> You should use v1.07 of the eASLRB PDF (normal version, not the "inherited zoom" version). Other versions <i>may</i> work, but may have warnings and/or errors. </p>
<p> You should use v2.01 of the eASLRB PDF (normal version, not the "inherited zoom" version). Other versions <i>may</i> work, but may have warnings and/or errors. </p>
</div>
</div>
</div>

@ -1,7 +1,7 @@
""" Allow the test suite to control a remote webapp server. """
import grpc
from google.protobuf.empty_pb2 import Empty
from google.protobuf.empty_pb2 import Empty #pylint: disable=no-name-in-module
from asl_rulebook2.webapp.tests.proto.generated.control_tests_pb2_grpc import ControlTestsStub

@ -4,7 +4,7 @@ import os
import inspect
import logging
from google.protobuf.empty_pb2 import Empty
from google.protobuf.empty_pb2 import Empty #pylint: disable=no-name-in-module
from asl_rulebook2.webapp.tests.proto.generated.control_tests_pb2_grpc \
import ControlTestsServicer as BaseControlTestsServicer

@ -3,6 +3,7 @@
# source: control_tests.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
@ -14,121 +15,12 @@ _sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='control_tests.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13\x63ontrol_tests.proto\x1a\x1bgoogle/protobuf/empty.proto\",\n\x11SetDataDirRequest\x12\x17\n\x0f\x66ixturesDirName\x18\x01 \x01(\t\"h\n\x16SetAppConfigValRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x10\n\x06strVal\x18\x02 \x01(\tH\x00\x12\x10\n\x06intVal\x18\x03 \x01(\x05H\x00\x12\x11\n\x07\x62oolVal\x18\x04 \x01(\x08H\x00\x42\n\n\x08\x61\x63_oneof2\x86\x02\n\x0c\x43ontrolTests\x12<\n\nstartTests\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12:\n\x08\x65ndTests\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12\x38\n\nsetDataDir\x12\x12.SetDataDirRequest\x1a\x16.google.protobuf.Empty\x12\x42\n\x0fsetAppConfigVal\x12\x17.SetAppConfigValRequest\x1a\x16.google.protobuf.Emptyb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x63ontrol_tests.proto\x1a\x1bgoogle/protobuf/empty.proto\",\n\x11SetDataDirRequest\x12\x17\n\x0f\x66ixturesDirName\x18\x01 \x01(\t\"h\n\x16SetAppConfigValRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x10\n\x06strVal\x18\x02 \x01(\tH\x00\x12\x10\n\x06intVal\x18\x03 \x01(\x05H\x00\x12\x11\n\x07\x62oolVal\x18\x04 \x01(\x08H\x00\x42\n\n\x08\x61\x63_oneof2\x86\x02\n\x0c\x43ontrolTests\x12<\n\nstartTests\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12:\n\x08\x65ndTests\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12\x38\n\nsetDataDir\x12\x12.SetDataDirRequest\x1a\x16.google.protobuf.Empty\x12\x42\n\x0fsetAppConfigVal\x12\x17.SetAppConfigValRequest\x1a\x16.google.protobuf.Emptyb\x06proto3')
_SETDATADIRREQUEST = _descriptor.Descriptor(
name='SetDataDirRequest',
full_name='SetDataDirRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='fixturesDirName', full_name='SetDataDirRequest.fixturesDirName', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=96,
)
_SETAPPCONFIGVALREQUEST = _descriptor.Descriptor(
name='SetAppConfigValRequest',
full_name='SetAppConfigValRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='SetAppConfigValRequest.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='strVal', full_name='SetAppConfigValRequest.strVal', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='intVal', full_name='SetAppConfigValRequest.intVal', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='boolVal', full_name='SetAppConfigValRequest.boolVal', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='ac_oneof', full_name='SetAppConfigValRequest.ac_oneof',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=98,
serialized_end=202,
)
_SETAPPCONFIGVALREQUEST.oneofs_by_name['ac_oneof'].fields.append(
_SETAPPCONFIGVALREQUEST.fields_by_name['strVal'])
_SETAPPCONFIGVALREQUEST.fields_by_name['strVal'].containing_oneof = _SETAPPCONFIGVALREQUEST.oneofs_by_name['ac_oneof']
_SETAPPCONFIGVALREQUEST.oneofs_by_name['ac_oneof'].fields.append(
_SETAPPCONFIGVALREQUEST.fields_by_name['intVal'])
_SETAPPCONFIGVALREQUEST.fields_by_name['intVal'].containing_oneof = _SETAPPCONFIGVALREQUEST.oneofs_by_name['ac_oneof']
_SETAPPCONFIGVALREQUEST.oneofs_by_name['ac_oneof'].fields.append(
_SETAPPCONFIGVALREQUEST.fields_by_name['boolVal'])
_SETAPPCONFIGVALREQUEST.fields_by_name['boolVal'].containing_oneof = _SETAPPCONFIGVALREQUEST.oneofs_by_name['ac_oneof']
DESCRIPTOR.message_types_by_name['SetDataDirRequest'] = _SETDATADIRREQUEST
DESCRIPTOR.message_types_by_name['SetAppConfigValRequest'] = _SETAPPCONFIGVALREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETDATADIRREQUEST = DESCRIPTOR.message_types_by_name['SetDataDirRequest']
_SETAPPCONFIGVALREQUEST = DESCRIPTOR.message_types_by_name['SetAppConfigValRequest']
SetDataDirRequest = _reflection.GeneratedProtocolMessageType('SetDataDirRequest', (_message.Message,), {
'DESCRIPTOR' : _SETDATADIRREQUEST,
'__module__' : 'control_tests_pb2'
@ -143,61 +35,14 @@ SetAppConfigValRequest = _reflection.GeneratedProtocolMessageType('SetAppConfigV
})
_sym_db.RegisterMessage(SetAppConfigValRequest)
_CONTROLTESTS = DESCRIPTOR.services_by_name['ControlTests']
if _descriptor._USE_C_DESCRIPTORS == False:
_CONTROLTESTS = _descriptor.ServiceDescriptor(
name='ControlTests',
full_name='ControlTests',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=205,
serialized_end=467,
methods=[
_descriptor.MethodDescriptor(
name='startTests',
full_name='ControlTests.startTests',
index=0,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='endTests',
full_name='ControlTests.endTests',
index=1,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='setDataDir',
full_name='ControlTests.setDataDir',
index=2,
containing_service=None,
input_type=_SETDATADIRREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='setAppConfigVal',
full_name='ControlTests.setAppConfigVal',
index=3,
containing_service=None,
input_type=_SETAPPCONFIGVALREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CONTROLTESTS)
DESCRIPTOR.services_by_name['ControlTests'] = _CONTROLTESTS
DESCRIPTOR._options = None
_SETDATADIRREQUEST._serialized_start=52
_SETDATADIRREQUEST._serialized_end=96
_SETAPPCONFIGVALREQUEST._serialized_start=98
_SETAPPCONFIGVALREQUEST._serialized_end=202
_CONTROLTESTS._serialized_start=205
_CONTROLTESTS._serialized_end=467
# @@protoc_insertion_point(module_scope)

@ -20,7 +20,7 @@ def test_asop_nav( webdriver, webapp ):
# load the ASOP
fname = os.path.join( os.path.dirname(__file__), "fixtures/asop/asop/index.json" )
with open( fname, "r" ) as fp:
with open( fname, "r", encoding="utf-8" ) as fp:
asop_index = json.load( fp )
# check the nav
@ -64,7 +64,7 @@ def test_asop_content( webdriver, webapp ):
fname = os.path.join( base_dir, fname )
if not os.path.isfile( fname ):
return None
with open( fname, "r" ) as fp:
with open( fname, "r", encoding="utf-8" ) as fp:
return json.load( fp ) if as_json else fp.read()
# load the ASOP index
@ -121,6 +121,8 @@ def test_asop_content( webdriver, webapp ):
assert content[0:20] in expected_content
# check each individual section
def check_title():
return find_child( "#asop .title" ).text == expected
for section_no, nav_section in enumerate( nav[chapter_no]["sections"] ):
# click on the section in the nav pane
@ -130,7 +132,7 @@ def test_asop_content( webdriver, webapp ):
expected = expected_sections[ section_no ][ "caption" ]
if expected_chapter.get( "sniper_phase" ):
expected += "\u2020"
wait_for( 2, lambda: find_child("#asop .title").text == expected )
wait_for( 2, check_title )
# check the preamble
# NOTE: The preamble is part of the parent chapter, and so should remain unchanged.

@ -1,5 +1,7 @@
""" Test how content sets are handled. """
from selenium.webdriver.common.by import By
from asl_rulebook2.webapp.tests.utils import init_webapp, select_tabbed_page, get_curr_target, \
set_stored_msg_marker, get_last_error_msg, find_child, find_children, wait_for, has_class
from asl_rulebook2.webapp.tests.test_search import do_search
@ -144,7 +146,7 @@ def _select_chapter( chapter_elem ):
find_child( ".title", chapter_elem ).click()
wait_for( 2, lambda: find_child( ".entries", chapter_elem ).is_displayed() )
# make sure all other chapters are collapsed
parent = chapter_elem.find_element_by_xpath( ".." )
parent = chapter_elem.find_element( By.XPATH, ".." )
assert has_class( parent, "accordian" )
for elem in find_children( ".accordian-pane", parent ):
is_expanded = find_child( ".entries", elem ).is_displayed()

@ -19,8 +19,8 @@ def test_doc( webapp, webdriver ):
def get_doc( path ):
# get the specified documentation file
url = "{}/{}".format( webapp.base_url, path )
resp = urllib.request.urlopen( url ).read()
return resp.decode( "utf-8" )
with urllib.request.urlopen( url ) as resp:
return resp.read().decode( "utf-8" )
# test a valid documentation file
resp = get_doc( "/doc/prepare.md" )

@ -36,11 +36,13 @@ def test_footnotes( webdriver, webapp ):
if root.attrib["class"] == "footnote":
# this is a single footnote
footnote_elems = [ root ]
get_content = lambda fnote: fnote.find( "div[@class='content']" ).text
def get_content( fnote ):
return fnote.find( "div[@class='content']" ).text
else:
# there are multiple footnotes
footnote_elems = root.findall( "div[@class='footnote']" )
get_content = lambda fnote: "".join( fnote.xpath( "text()" ) )
def get_content( fnote ):
return "".join( fnote.xpath( "text()" ) )
# extract content from each footnote
for footnote in footnote_elems:
header = footnote.find( "div[@class='header']" )

@ -88,7 +88,8 @@ def test_full_prepare( webapp, webdriver ):
assert zip_file.getinfo( "ASL Rulebook.pdf" ).file_size > 40*1000
for ftype in [ "index", "targets", "chapters", "footnotes" ]:
fname = os.path.join( dname, ftype+".json" )
expected = json.load( open( fname, "r" ) )
with open( fname, "r", encoding="utf-8" ) as fp:
expected = json.load( fp )
fdata = zip_file.read( "ASL Rulebook.{}".format( ftype ) )
assert json.loads( fdata ) == expected

@ -8,6 +8,7 @@ import re
import uuid
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from asl_rulebook2.utils import strip_html
@ -40,9 +41,9 @@ def init_webapp( webapp, webdriver, **options ):
_wait_for_webapp()
# make sure there were no errors or warnings
startup_msgs = json.load(
urllib.request.urlopen( webapp.url_for( "get_startup_msgs" ) )
)
url = webapp.url_for( "get_startup_msgs" )
with urllib.request.urlopen( url ) as resp:
startup_msgs = json.load( resp )
errors = startup_msgs.pop( "error", [] )
errors = [ e[0] for e in errors ]
assert set( errors ) == set( expected_errors )
@ -171,7 +172,7 @@ def find_child( sel, parent=None ):
try:
if parent is None:
parent = _webdriver
return parent.find_element_by_css_selector( sel )
return parent.find_element( By.CSS_SELECTOR, sel )
except NoSuchElementException:
return None
@ -180,7 +181,7 @@ def find_children( sel, parent=None ):
try:
if parent is None:
parent = _webdriver
return parent.find_elements_by_css_selector( sel )
return parent.find_elements( By.CSS_SELECTOR, sel )
except NoSuchElementException:
return None

@ -156,8 +156,9 @@ def _make_webapp():
def is_ready():
"""Try to connect to the webapp server."""
try:
resp = urllib.request.urlopen( app.url_for( "ping" ) ).read()
assert resp == b"pong"
url = app.url_for( "ping" )
with urllib.request.urlopen( url ) as resp:
assert resp.read() == b"pong"
return True
except URLError:
return False
@ -167,9 +168,9 @@ def _make_webapp():
# set up control of the remote webapp server
try:
resp = json.load(
urllib.request.urlopen( app.url_for( "get_control_tests" ) )
)
url = app.url_for( "get_control_tests" )
with urllib.request.urlopen( url ) as resp:
resp = json.load( resp )
except urllib.error.HTTPError as ex:
if ex.code == 404:
raise RuntimeError( "Can't get the test control port - has remote test control been enabled?" ) from ex

@ -1,3 +1,31 @@
body { margin: 0 ; padding:1em ; }
#header {
position: fixed ; top: 1em ; height: 3em ;
width: calc(100% - 2em) ;
}
#header .caption {
display: inline-block ;
border: 1px solid black ; border-radius: 5px ;
padding: 4px 12px ;
font-size: 150% ; font-weight: bold ;
background: #ddd ;
}
#content {
position: fixed ; top: 4.25em ; bottom: 1em ;
width: calc(100% - 2em) ;
overflow: auto ;
}
h3 {
display: inline-block ;
margin: 0 ;
border: 1px solid #888 ; border-radius: 5px ;
padding: 2px 8px ;
color: #444 ; background: #f8f8f8 ;
}
.ruleid { font-family: monospace ; }
.info {
@ -5,4 +33,3 @@
padding-left: 30px ; background: no-repeat url(info.png) ;
font-size: 90% ; font-style: italic ; color: #444 ;
}

@ -10,11 +10,15 @@
<body>
<p> <img src="images/search-heat.png" class="imageZoom" style="float:right;width:40%;margin-left:1em;">
<h2> ASL Rulebook 2
<a href="https://github.com/pacman-ghost/asl-rulebook2/releases" target="_blank"><img src="images/download.png" style="height:0.75em;"></a>
</h2>
<p> Out-of-the-box, this program gives you full-text search over the ASLRB index.
<div id="header">
<div class="caption"> ASL Rulebook 2 </div>
<a href="https://code.pacman-ghost.com/public/asl-rulebook2/releases" target="_blank" style="float:right;"><img src="images/download.png" style="height:2em;"></a>
</div>
<div id="content">
<img src="images/search-heat.png" class="imageZoom" style="float:right;width:40%;margin-left:1em;">
Out of the box, this program gives you full-text search over the ASLRB index.
<p> It will jump to the exact position in the eASLRB PDF for a rule when you click on a search result.
<p> And if the rule has any associated footnotes, these will be shown in a popup.
@ -24,26 +28,26 @@
<br><br>
<p> <img src="images/search-cellar.png" class="imageZoom" style="float:right;width:40%;margin-left:1em;">
<h3> Adding rules for other modules </h3>
<img src="images/search-cellar.png" class="imageZoom" style="float:right;width:40%;margin-left:1em;">
<p> Once you've got the program up and running, you can then think about extending it. It's a lot of work, but the results are insanely cool!
<p> To the right, I've searched for <em>"cellar"</em>, and the program has found results from <em>Red Barricades</em>. The rules for this are referenced in the ASLRB index, but the content is not yet in the MMP eASLRB.
<p> However, I've installed a PDF scan of the rules, plus information about where each rule is within that PDF (a <em>"targets file"</em>), and so when I click on a search result, it seamlessly opens the <em>Red Barricades</em> PDF and jumps to that rule.
<br clear="all"> <br>
<p> <img src="images/search-air-support.png" class="imageZoom" style="float:right;width:40%;margin-left:1em;">
You can also include third-party modules that are not referenced in the ASLRB index. I've also installed a PDF of LFT's <em>Kampfgruppe Scherer</em>, a targets files for it, and also an <em>index file</em>. This last file is also searched, and clicking on a search result takes me directly to the associated rule in the KGS PDF.
You can also include third-party modules that are not referenced in the ASLRB index. I've also installed a PDF of LFT's <em>Kampfgruppe Scherer</em>, a targets file for it, and also an <em>index file</em>. This last file is also searched, and clicking on a search result takes me directly to the associated rule in the KGS PDF.
<br>
<p> I've also added information about chapters in the PDF, so that I can browse through them in the usual way: <br>
<img src="images/chapters-extended.png" class="imageZoom" style="margin:1em 0 0 2em;width:30%;">
<br><br>
<br>
<p> <img src="images/ruleinfo-encircled.png" class="imageZoom" style="float:right;width:30%;margin-left:1em;">
<h3> Showing Q+A and errata </h3>
Q+A and errata can also be included. This is a <em>lot</em> of work, but the results are amazing. If you click on a rule that has Q+A and/or errata associated with it, they will be shown, alongside the rule you're looking for.
<img src="images/ruleinfo-encircled.png" class="imageZoom" style="float:right;width:30%;margin-left:1em;">
<p> Q+A and errata can also be included. This is a <em>lot</em> of work, but the results are amazing. If you click on a rule that has Q+A and/or errata associated with it, they will be shown, alongside the rule you're looking for.
<p> Here, I've searched for <em>"encircled"</em>, and the program has automatically shown Q+A and errata for rule <span class="ruleid">A7.7</span>.
<div class="info"> Note that this errata is actually obsolete, since it's already been incorporated into the current MMP eASLRB, but it's shown here as an example. </div>
<br clear="all">
@ -54,19 +58,21 @@ Going back to the search results, you can see these Q+A entries included in ther
<br clear="all">
<p> <img src="images/user-anno.png" class="imageZoom" style="float:left;width:30%;margin-right:1em;">
<h3> User annotations </h3>
You can also add your own notes to the search engine.
<img src="images/user-anno.png" class="imageZoom" style="float:left;width:30%;margin-right:1em;">
<p> You can also add your own notes to the search engine.
<p> Here, I've added a note about what <em>Majority Squad Type</em> means, and a link back to the Game Squad post that talks about it.
<br clear="all">
<p> <img src="images/asop.png" class="imageZoom" style="float:right;width:40%;margin-left:1em;">
<h3> Advanced Sequence Of Play </h3>
Finally, the ASOP can also be included, with clickable links to each of the referenced rules.
<img src="images/asop.png" class="imageZoom" style="float:right;width:40%;margin-left:1em;">
<p> Finally, the ASOP can also be included, with clickable links to each of the referenced rules.
<p> ASOP entries are also included in search results: <br>
<img src="images/asop-sr.png" class="imageZoom" style="margin:1em 0 0 2em;width:30%;">
</div> <!-- content -->
</body>
<script src="jquery-3.6.0.min.js"></script>

@ -1,6 +1,6 @@
pytest==6.2.2
selenium==3.141.0
grpcio==1.36.1
protobuf==3.15.6
pylint==2.6.2
pytest==7.1.2
selenium==4.2.0
grpcio==1.46.3
grpcio-tools==1.46.3
pylint==2.14.1
pytest-pylint==0.18.0

@ -1,12 +1,12 @@
# python 3.8.7
# python 3.10.4
flask==1.1.2
flask-socketio==5.1.1
eventlet==0.33.0
pyyaml==5.4.1
lxml==4.6.2
markdown==3.3.6
click==7.1.2
flask==2.1.2
flask-socketio==5.2.0
eventlet==0.33.1
pyyaml==6.0
lxml==4.9.0
markdown==3.3.7
click==8.1.3
pdfminer.six==20201018
pikepdf==2.5.2
pdfminer.six==20220524
pikepdf==5.1.3

@ -16,21 +16,22 @@ def parse_requirements( fname ):
"""Parse a requirements file."""
lines = []
fname = os.path.join( os.path.dirname(__file__), fname )
for line in open(fname,"r"):
line = line.strip()
if line == "" or line.startswith("#"):
continue
lines.append( line )
with open( fname, "r", encoding="utf-8" ) as fp:
for line in fp:
line = line.strip()
if line == "" or line.startswith("#"):
continue
lines.append( line )
return lines
# ---------------------------------------------------------------------
setup(
name = "asl_rulebook2",
version = "0.2", # nb: also update constants.py
version = "0.3", # nb: also update constants.py
description = "Search engine for the eASLRB.",
license = "AGPLv3",
url = "https://github.com/pacman-ghost/asl-rulebook2",
url = "https://code.pacman-ghost.com/public/asl-rulebook2",
packages = find_packages(),
install_requires = parse_requirements( "requirements.txt" ),
extras_require = {

Loading…
Cancel
Save