Compare commits

...

68 Commits
v0.1 ... master

Author SHA1 Message Date
Pacman Ghost 75d6678e18 Updated the pylint config. 2 years ago
Pacman Ghost a03f917f05 Updated links to point to pacman-ghost.com. 2 years ago
Pacman Ghost 04d52c85bd Don't warn about missing page numbers for publisher articles. 2 years ago
Pacman Ghost 757e9797dc Updated the version strings. 2 years ago
Pacman Ghost 838d3d1c1e Updated dependencies. 2 years ago
Pacman Ghost 1945c8d4e7 Updated dependencies. 2 years ago
Pacman Ghost a5f931ce51 Updated the READ-ME to point to the pre-loaded database. 2 years ago
Pacman Ghost 32b3ebdf5e Strip HTML when setting the browser titlebar. 2 years ago
Pacman Ghost 51ff9e960b Handle quoted words in author names. 2 years ago
Pacman Ghost 11c8f0dced Changed how we scroll to articles already on-screen. 2 years ago
Pacman Ghost 1446d97ac3 Allow the image zoom Javascript to be cached. 2 years ago
Pacman Ghost f080805e77 Fixed some issues when running the test suite against a remote backend server. 2 years ago
Pacman Ghost 49618b9d9c Minor documentation changes. 2 years ago
Pacman Ghost 20f03c2dc1 Added a watermark. 2 years ago
Pacman Ghost 01be3e9880 Minor UI changes. 2 years ago
Pacman Ghost c59e189998 Updated the version strings. 2 years ago
Pacman Ghost 7575d2c217 Use waitress to serve the webapp. 2 years ago
Pacman Ghost 81445487f5 Fixed a problem updating the UI after deleting something. 2 years ago
Pacman Ghost 197a665b10 Made the database reporting tools available in the webapp UI. 2 years ago
Pacman Ghost 189d72725c Update the browser URL after selecting menu items. 3 years ago
Pacman Ghost d81a02317f Got filtering working for standard Select droplists. 3 years ago
Pacman Ghost a0410f5960 Fixed an issue parsing quoted search phrases that contain special characters. 3 years ago
Pacman Ghost 49c608186c Allow publisher articles to have a publication date. 3 years ago
Pacman Ghost 95e662c9f6 Changed how data is transfered between the front- and back-end. 3 years ago
Pacman Ghost fdc287bb61 Allow articles to be associated with a publisher. 3 years ago
Pacman Ghost 41c5d261af Run the Docker container using the caller's UID/GID. 3 years ago
Pacman Ghost db1469023b Updated to a newer version of Flask-SQLAlchemy. 3 years ago
Pacman Ghost 425fdb00e2 Updated to Python 3.8.7. 3 years ago
Pacman Ghost b1c3ee20fb Tightened up how we build the Docker images. 4 years ago
Pacman Ghost d7f5325444 Added a switch to specify which Docker network to use when building the container. 4 years ago
Pacman Ghost 3c50539568 Handle errors when building the Docker containers correctly. 4 years ago
Pacman Ghost 988966531f Tightened up the .dockerignore files. 4 years ago
Pacman Ghost 3cf615c48f Tweaked how we do multi-stage Docker builds. 4 years ago
Pacman Ghost 63fdfbadb0 Don't deploy the debug config file into the containers. 4 years ago
Pacman Ghost a04fa82a14 Updated some comments. 4 years ago
Pacman Ghost 21248450c3 Added an option to the run the containers without building them first. 4 years ago
Pacman Ghost 4afe723448 Worked around a pip problem when building the containers. 4 years ago
Pacman Ghost 2f3ceefd30 Shortened the nginx proxy timeouts. 4 years ago
Pacman Ghost d6b61aabc5 Forward navigation key-presses from the search query box to the results pane. 4 years ago
Pacman Ghost b261562dda Show a better error message if the Flask server is not running. 4 years ago
Pacman Ghost fff0d9352b Allow images to be previewed. 4 years ago
Pacman Ghost e25d478f6a Improved how we manage image caching. 4 years ago
Pacman Ghost 898e34535d Added a script to report on images in the database. 4 years ago
Pacman Ghost e6760ce807 Allow the "style" attribute in HTML content. 4 years ago
Pacman Ghost a2045ed398 Tightened up how search aliases are processed. 4 years ago
Pacman Ghost 2488aad352 Made the container port numbers configurable. 4 years ago
Pacman Ghost 83d25e1b1a Allow the user to link to user-defined files. 4 years ago
Pacman Ghost 57c547a220 Moved the search engine config settings out into a separate file. 4 years ago
Pacman Ghost ea9999054b Changed how we enable/disable test mode in the web container. 4 years ago
Pacman Ghost 8ec19b2f94 Force startup initialization to happen early. 4 years ago
Pacman Ghost 6b44b8fd33 Allow author names to be aliased. 4 years ago
Pacman Ghost 59f9c8ccd1 Updated the run-containers script to use getopt. 4 years ago
Pacman Ghost 45dbad6a62 Added a script to find broken links to external documents. 4 years ago
Pacman Ghost fe35b6a8db Adding styling for nested italics. 4 years ago
Pacman Ghost 5b1305a1d3 Scroll to an article when clicking on them within a publication. 4 years ago
Pacman Ghost 409678ecce Made article search result headers clickable. 4 years ago
Pacman Ghost fc23bc20e9 Added menu and dialog icons. 4 years ago
Pacman Ghost 922479ff57 Added integration with an eASLRB. 4 years ago
Pacman Ghost 64141166f1 Show startup warnings in the UI. 4 years ago
Pacman Ghost 1c074703e5 Allow fields to be weighted when doing searches. 4 years ago
Pacman Ghost 9d4572d3d8 Allow articles to be rated. 4 years ago
Pacman Ghost f6078b66e0 Minor style changes. 4 years ago
Pacman Ghost b6f8fcd34d Added a seq# to publications. 4 years ago
Pacman Ghost 27603c3eef Don't clean HTML for URL fields. 4 years ago
Pacman Ghost d788301972 Handle a permalink to an unknwon author correctly. 4 years ago
Pacman Ghost 9a9768ba55 "Show a "more" marker for collapsible lists. 4 years ago
Pacman Ghost c9a431689e Added a script to check for broken links to external documents. 4 years ago
Pacman Ghost aadc5dc0b2 Added permalinks for the menu items. 4 years ago
  1. 14
      .dockerignore
  2. 92
      .pylintrc
  3. 42
      Dockerfile
  4. 64
      README.md
  5. 28
      alembic/versions/21ec84874208_added_article_ratings.py
  6. 28
      alembic/versions/3d58e8ebf8c6_added_a_seq_for_publications.py
  7. 28
      alembic/versions/702eeb219037_allow_articles_to_have_a_publication_.py
  8. 40
      alembic/versions/a33edb7272a2_allow_articles_to_be_associated_with_a_.py
  9. 11
      asl_articles/__init__.py
  10. 147
      asl_articles/articles.py
  11. 31
      asl_articles/authors.py
  12. 61
      asl_articles/config/app.cfg
  13. 42
      asl_articles/config/author-aliases.cfg.example
  14. 2
      asl_articles/config/constants.py
  15. 67
      asl_articles/config/search.cfg
  16. 6
      asl_articles/config/site.cfg.example
  17. 149
      asl_articles/db_report.py
  18. 15
      asl_articles/docs.py
  19. 2
      asl_articles/images.py
  20. 10
      asl_articles/main.py
  21. 7
      asl_articles/models.py
  22. 106
      asl_articles/publications.py
  23. 69
      asl_articles/publishers.py
  24. 14
      asl_articles/scenarios.py
  25. 508
      asl_articles/search.py
  26. 7
      asl_articles/startup.py
  27. 7
      asl_articles/tags.py
  28. 3
      asl_articles/tests/__init__.py
  29. 3
      asl_articles/tests/fixtures/author-aliases.cfg
  30. 31
      asl_articles/tests/fixtures/author-aliases.json
  31. 42
      asl_articles/tests/fixtures/db-report.json
  32. 1
      asl_articles/tests/fixtures/docs/aslj-1.html
  33. 1
      asl_articles/tests/fixtures/docs/aslj-2.html
  34. 1
      asl_articles/tests/fixtures/docs/mmp.html
  35. 11
      asl_articles/tests/fixtures/publisher-article-dates.json
  36. 17
      asl_articles/tests/fixtures/publisher-articles.json
  37. 249
      asl_articles/tests/test_articles.py
  38. 3
      asl_articles/tests/test_authors.py
  39. 235
      asl_articles/tests/test_db_report.py
  40. 74
      asl_articles/tests/test_image_preview.py
  41. 5
      asl_articles/tests/test_import_roar_scenarios.py
  42. 24
      asl_articles/tests/test_publications.py
  43. 14
      asl_articles/tests/test_publishers.py
  44. 3
      asl_articles/tests/test_scenarios.py
  45. 189
      asl_articles/tests/test_search.py
  46. 4
      asl_articles/tests/test_startup.py
  47. 6
      asl_articles/tests/test_tags.py
  48. 65
      asl_articles/tests/utils.py
  49. 52
      asl_articles/utils.py
  50. 39
      conftest.py
  51. BIN
      doc/publication.png
  52. BIN
      doc/publishers.png
  53. BIN
      doc/search.png
  54. BIN
      doc/tag.png
  55. 31
      docker-compose.yml
  56. 1
      docker/config/site.cfg
  57. 10
      requirements-dev.txt
  58. 15
      requirements.txt
  59. 180
      run-containers.sh
  60. 57
      run_server.py
  61. 14
      setup.py
  62. 3
      tools/import_roar_scenarios.py
  63. 11
      web/.dockerignore
  64. 7
      web/Dockerfile
  65. 3
      web/docker/env
  66. 12
      web/docker/nginx-default.conf
  67. 37270
      web/package-lock.json
  68. 6
      web/package.json
  69. BIN
      web/public/favicon.ico
  70. 0
      web/public/images/bullet2.png
  71. BIN
      web/public/images/check-db-links.png
  72. BIN
      web/public/images/edit.png
  73. BIN
      web/public/images/info.png
  74. BIN
      web/public/images/link-error-bullet.png
  75. BIN
      web/public/images/menu/article.png
  76. BIN
      web/public/images/menu/db-report.png
  77. BIN
      web/public/images/menu/publication.png
  78. BIN
      web/public/images/menu/publisher.png
  79. BIN
      web/public/images/menu/publishers.png
  80. BIN
      web/public/images/menu/technique.png
  81. BIN
      web/public/images/menu/tips.png
  82. BIN
      web/public/images/open-link.png
  83. BIN
      web/public/images/rating-star-disabled.png
  84. BIN
      web/public/images/rating-star.png
  85. BIN
      web/public/images/watermark.png
  86. 48
      web/public/jQuery/imageZoom/jquery.imageZoom.css
  87. 195
      web/public/jQuery/imageZoom/jquery.imageZoom.js
  88. BIN
      web/public/jQuery/imageZoom/jquery.imageZoom.png
  89. 23
      web/src/App.css
  90. 280
      web/src/App.js
  91. 1
      web/src/ArticleSearchResult.css
  92. 257
      web/src/ArticleSearchResult.js
  93. 124
      web/src/ArticleSearchResult2.js
  94. 59
      web/src/DataCache.js
  95. 24
      web/src/DbReport.css
  96. 387
      web/src/DbReport.js
  97. 4
      web/src/ModalForm.css
  98. 90
      web/src/PreviewableImage.js
  99. 6
      web/src/PublicationSearchResult.css
  100. 186
      web/src/PublicationSearchResult.js
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1 +1,13 @@
web
*
# NOTE: docker-compose doesn't allow spaces after the !'s :-/
!setup.py
!requirements*.txt
!asl_articles/
!run_server.py
!LICENSE.txt
!alembic/
!docker/

@ -60,17 +60,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=print-statement,
parameter-unpacking,
unpacking-in-except,
old-raise-syntax,
backtick,
long-suffix,
old-ne-operator,
old-octal-literal,
import-star-module-level,
non-ascii-bytes-literal,
raw-checker-failed,
disable=raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
@ -78,74 +68,15 @@ disable=print-statement,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
apply-builtin,
basestring-builtin,
buffer-builtin,
cmp-builtin,
coerce-builtin,
execfile-builtin,
file-builtin,
long-builtin,
raw_input-builtin,
reduce-builtin,
standarderror-builtin,
unicode-builtin,
xrange-builtin,
coerce-method,
delslice-method,
getslice-method,
setslice-method,
no-absolute-import,
old-division,
dict-iter-method,
dict-view-method,
next-method-called,
metaclass-assignment,
indexing-exception,
raising-string,
reload-builtin,
oct-method,
hex-method,
nonzero-method,
cmp-method,
input-builtin,
round-builtin,
intern-builtin,
unichr-builtin,
map-builtin-not-iterating,
zip-builtin-not-iterating,
range-builtin-not-iterating,
filter-builtin-not-iterating,
using-cmp-argument,
eq-without-hash,
div-method,
idiv-method,
rdiv-method,
exception-message-attribute,
invalid-str-codec,
sys-max-int,
bad-python3-import,
deprecated-string-function,
deprecated-str-translate-call,
deprecated-itertools-function,
deprecated-types-field,
next-method-defined,
dict-items-not-iterating,
dict-keys-not-iterating,
dict-values-not-iterating,
deprecated-operator-function,
deprecated-urllib-function,
xreadlines-attribute,
deprecated-sys-function,
exception-escape,
comprehension-escape,
bad-whitespace,
invalid-name,
wrong-import-position,
global-statement,
bad-continuation,
too-few-public-methods,
no-else-return
no-else-return,
consider-using-f-string,
use-implicit-booleaness-not-comparison,
duplicate-code,
unnecessary-lambda-assignment,
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
@ -240,7 +171,7 @@ contextmanager-decorators=contextlib.contextmanager
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
# NOTE: We disable warnings for SQLAlchemy's query.filter/filter_by/join() methods.
generated-members=filter,join
generated-members=filter,join,session.query,session.add,session.commit
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
@ -262,7 +193,7 @@ ignore-on-opaque-inference=yes
# for classes with dynamically set attributes). This supports the use of
# qualified names.
# NOTE: We disable warnings for SQLAlchemy's Column class members e.g. ilike(), asc()
ignored-classes=optparse.Values,thread._local,_thread._local,Column
ignored-classes=optparse.Values,thread._local,_thread._local,scoped_session,Column
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
@ -307,13 +238,6 @@ max-line-length=120
# Maximum number of lines in a module.
max-module-lines=1000
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,
dict-separator
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no

@ -1,28 +1,33 @@
# NOTE: psycopg2-binary won't install into Alpine because pg_config is missing, and to install that,
# we need a full development environment :-/
# We do a multi-stage build (requires Docker >= 17.05) to install everything, then copy it all
# to the final target image.
# https://github.com/psycopg/psycopg2/issues/684
FROM python:alpine3.7 AS base
FROM rockylinux:8.5 AS base
# update packages and install Python
RUN dnf -y upgrade-minimal && \
dnf install -y python38 && \
dnf clean all
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# install the requirements
FROM base AS build
RUN mkdir /install
# NOTE: psycopg2 needs postgresql-dev and build tools, lxml needs libxslt
RUN apk update && apk add postgresql-dev gcc python3-dev musl-dev && apk add libxslt-dev
WORKDIR /install
COPY requirements.txt /tmp/
# set up a virtualenv
RUN python3 -m venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
RUN pip install --upgrade pip
RUN pip install --install-option="--prefix=/install" -r /tmp/requirements.txt
# install the application requirements
COPY requirements.txt /tmp/
RUN pip install -r /tmp/requirements.txt
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FROM base
# copy the Python requirements
COPY --from=build /install /usr/local
RUN apk --no-cache add libpq
RUN pip install --upgrade pip
RUN apk add libxslt
# copy the virtualenv from the build image
COPY --from=build /opt/venv /opt/venv
ENV PATH="/opt/venv/bin:$PATH"
# install the application
WORKDIR /app
@ -34,10 +39,15 @@ RUN pip install -e .
ARG ENABLE_TESTS
COPY asl_articles/config/logging.yaml.example asl_articles/config/logging.yaml
COPY docker/config/* asl_articles/config/
RUN rm -f asl_articles/config/debug.cfg
# copy the alembic files (so that users can upgrade their database)
COPY alembic alembic
# NOTE: We set these so that we can update the database outside the container.
ENV UID=$DOCKER_UID
ENV GID=$DOCKER_GID
# launch the web server
EXPOSE 5000
ENV DBCONN undefined

@ -1,28 +1,30 @@
This program provides a searchable interface to your ASL magazines and their articles.
It is written using React (Javascript) for the front-end, and a Flask (Python) back-end. For ease of use, it can be run using Docker containers.
### To create a new database
*NOTE: This requires the Python environment to have been set up (see the developer notes below).*
Go to the *alembic/* directory and change the database connection string in *alembic.ini* e.g.
```sqlalchemy.url = sqlite:////home/pacman-ghost/asl-articles.db```
Note that there are 3 forward slashes for the protocol, the 4th one is the start of the path to the database.
Run the following command to create the database (you must be in the *alembic/* directory):
[<img src="doc/publishers.png" height="150">](doc/publishers.png)
&nbsp;
[<img src="doc/publication.png" height="150">](doc/publication.png)
&nbsp;
[<img src="doc/search.png" height="150">](doc/search.png)
&nbsp;
[<img src="doc/tag.png" height="150">](doc/tag.png)
*NOTE: This project integrates with my other [asl-rulebook2](https://code.pacman-ghost.com/public/asl-rulebook2) project. Add a setting to your `site.cfg` e.g.*
```
ASLRB_BASE_URL = http://localhost:5020
```
```alembic upgrade head```
*and references to rules will be converted to clickable links that will open the ASLRB at that rule.*
### To run the application
Go to the project root directory and run the following command:
Get a copy of the pre-loaded database from the release page.
```./run-containers.sh /home/pacman-ghost/asl-articles.db```
Then go to the project root directory and run the following command:
```
./run-containers.sh -d /home/pacman-ghost/asl-articles.db
```
*NOTE: You will need Docker >= 17.05 (for multi-stage builds)*
*NOTE: You will need Docker >= 17.05 (for multi-stage builds)*, and `docker-compose`.
Then open a browser and go to http://localhost:3002
@ -35,13 +37,15 @@ It is possible to configure publications and their articles so that clicking the
For security reasons, browsers don't allow *file://* links to PDF's, they must be served by a web server. This program supports this, but some things need to be set up first.
When you run the application, specify the top-level directory that contains your PDF's in the command line e.g.
```./run-containers.sh /home/pacman-ghost/asl-articles.db /home/pacman-ghost/asl-articles-docs/```
```
./run-containers.sh \
-d /home/pacman-ghost/asl-articles.db \
-e /home/pacman-ghost/asl-articles-docs/
```
Then, configure your document paths *relative to that directory*.
For example, say I have my files organized like this:
```
* /home/pacman-ghost/
+-- asl-articles.db
@ -63,14 +67,16 @@ The application is split over 2 Docker containers, one running a React front-end
##### Setting up the Flask (Python) back-end
Create a *virtualenv*, then go to the *asl_articles/* directory and install the requirements:
```pip install -e .[dev]```
```
pip install -e .[dev]
```
Copy *config/site.cfg.example* to *config/site.cfg*, and update it to point to your database.
Then run the server:
```./run-server.py```
```
./run-server.py
```
You can test if things are working by opening a browser and going to http://localhost:5000/ping
@ -79,9 +85,11 @@ You can test if things are working by opening a browser and going to http://loca
##### Setting up the React front-end
Go to the *web/* directory and install the requirements:
```npm install```
```
npm install
```
Then run the server:
```npm start```
```
npm start
```

@ -0,0 +1,28 @@
"""Added article ratings.
Revision ID: 21ec84874208
Revises: 3d58e8ebf8c6
Create Date: 2020-03-19 01:10:12.194485
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '21ec84874208'
down_revision = '3d58e8ebf8c6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('article', sa.Column('article_rating', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('article', 'article_rating')
# ### end Alembic commands ###

@ -0,0 +1,28 @@
"""Added a seq# for publications.
Revision ID: 3d58e8ebf8c6
Revises: 41cfc117c809
Create Date: 2020-03-18 10:26:25.801673
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3d58e8ebf8c6'
down_revision = '41cfc117c809'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('publication', sa.Column('pub_seqno', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('publication', 'pub_seqno')
# ### end Alembic commands ###

@ -0,0 +1,28 @@
"""Allow articles to have a publication date.
Revision ID: 702eeb219037
Revises: a33edb7272a2
Create Date: 2021-11-16 20:41:37.454305
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '702eeb219037'
down_revision = 'a33edb7272a2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('article', sa.Column('article_date', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('article', 'article_date')
# ### end Alembic commands ###

@ -0,0 +1,40 @@
"""Allow articles to be associated with a publisher.
Revision ID: a33edb7272a2
Revises: 21ec84874208
Create Date: 2021-10-22 20:10:50.440849
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a33edb7272a2'
down_revision = '21ec84874208'
branch_labels = None
depends_on = None
from alembic import context
is_sqlite = context.config.get_main_option( "sqlalchemy.url" ).startswith( "sqlite://" )
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('article', sa.Column('publ_id', sa.Integer(), nullable=True))
if is_sqlite:
op.execute( "PRAGMA foreign_keys = off" ) # nb: stop cascading deletes
with op.batch_alter_table('article') as batch_op:
batch_op.create_foreign_key('fk_article_publisher', 'publisher', ['publ_id'], ['publ_id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
if is_sqlite:
op.execute( "PRAGMA foreign_keys = off" ) # nb: stop cascading deletes
with op.batch_alter_table('article') as batch_op:
batch_op.drop_constraint('fk_article_publisher', type_='foreignkey')
op.drop_column('article', 'publ_id')
# ### end Alembic commands ###

@ -40,8 +40,8 @@ def _on_startup():
return
# initialize the search index
_logger = logging.getLogger( "startup" )
asl_articles.search.init_search( db.session, _logger )
logger = logging.getLogger( "startup" )
asl_articles.search.init_search( db.session, logger )
# ---------------------------------------------------------------------
@ -75,7 +75,7 @@ _load_config( _cfg, _fname, "Debug" )
# initialize logging
_fname = os.path.join( config_dir, "logging.yaml" )
if os.path.isfile( _fname ):
with open( _fname, "r" ) as fp:
with open( _fname, "r", encoding="utf-8" ) as fp:
logging.config.dictConfig( yaml.safe_load( fp ) )
else:
# stop Flask from logging every request :-/
@ -86,9 +86,7 @@ base_dir = os.path.join( BASE_DIR, ".." )
app = Flask( __name__ )
app.config.update( _cfg )
# connect to the database
# NOTE: We assume that this web server will only be handling a single user. If we ever have
# multiple concurrent users, we will need to change to per-session database connections.
# initialize the database connection
app.config[ "_IS_CONTAINER" ] = _cfg.get( "IS_CONTAINER" )
if _cfg.get( "IS_CONTAINER" ):
# if we are running in a container, the database must be specified in an env variable e.g.
@ -114,6 +112,7 @@ import asl_articles.scenarios #pylint: disable=cyclic-import
import asl_articles.images #pylint: disable=cyclic-import
import asl_articles.tags #pylint: disable=cyclic-import
import asl_articles.docs #pylint: disable=cyclic-import
import asl_articles.db_report #pylint: disable=cyclic-import
import asl_articles.utils #pylint: disable=cyclic-import
# initialize

@ -9,19 +9,18 @@ from sqlalchemy.sql.expression import func
from asl_articles import app, db
from asl_articles.models import Article, Author, ArticleAuthor, Scenario, ArticleScenario, ArticleImage
from asl_articles.models import Publication
from asl_articles.authors import do_get_authors
from asl_articles.scenarios import do_get_scenarios
from asl_articles.tags import do_get_tags
from asl_articles.authors import get_author_vals
from asl_articles.scenarios import get_scenario_vals
import asl_articles.publications
import asl_articles.publishers
from asl_articles import search
from asl_articles.utils import get_request_args, clean_request_args, clean_tags, encode_tags, decode_tags, \
apply_attrs, make_ok_response
_logger = logging.getLogger( "db" )
_FIELD_NAMES = [ "*article_title", "article_subtitle", "article_snippet", "article_pageno",
"article_url", "article_tags", "pub_id"
_FIELD_NAMES = [ "*article_title", "article_subtitle", "article_date", "article_snippet", "article_pageno",
"article_url", "article_tags", "pub_id", "publ_id"
]
# ---------------------------------------------------------------------
@ -34,9 +33,10 @@ def get_article( article_id ):
if not article:
abort( 404 )
_logger.debug( "- %s", article )
return jsonify( get_article_vals( article ) )
deep = request.args.get( "deep" )
return jsonify( get_article_vals( article, deep ) )
def get_article_vals( article, add_type=False ):
def get_article_vals( article, deep ):
"""Extract public fields from an Article record."""
authors = sorted( article.article_authors,
key = lambda a: a.seq_no
@ -45,20 +45,29 @@ def get_article_vals( article, add_type=False ):
key = lambda a: a.seq_no
)
vals = {
"_type": "article",
"article_id": article.article_id,
"article_title": article.article_title,
"article_subtitle": article.article_subtitle,
"article_image_id": article.article_id if article.article_image else None,
"article_authors": [ a.author_id for a in authors ],
"article_authors": [ get_author_vals( a.parent_author ) for a in authors ],
"article_date": article.article_date,
"article_snippet": article.article_snippet,
"article_pageno": article.article_pageno,
"article_url": article.article_url,
"article_scenarios": [ s.scenario_id for s in scenarios ],
"article_scenarios": [ get_scenario_vals( s.parent_scenario ) for s in scenarios ],
"article_tags": decode_tags( article.article_tags ),
"article_rating": article.article_rating,
"pub_id": article.pub_id,
"publ_id": article.publ_id,
}
if add_type:
vals[ "type" ] = "article"
if deep:
vals["_parent_pub"] = asl_articles.publications.get_publication_vals(
article.parent_pub, False, False
) if article.parent_pub else None
vals["_parent_publ"] = asl_articles.publishers.get_publisher_vals(
article.parent_publ, False, False
) if article.parent_publ else None
return vals
def get_article_sort_key( article ):
@ -78,38 +87,31 @@ def create_article():
log = ( _logger, "Create article:" )
)
warnings = []
updated = clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
# NOTE: Tags are stored in the database using \n as a separator, so we need to encode *after* cleaning them.
cleaned_tags = clean_tags( vals.get("article_tags"), warnings )
vals[ "article_tags" ] = encode_tags( cleaned_tags )
if cleaned_tags != vals.get( "article_tags" ):
updated[ "article_tags" ] = decode_tags( vals["article_tags"] )
# create the new article
vals[ "time_created" ] = datetime.datetime.now()
if not vals.get( "publ_id" ):
vals.pop( "article_date", None )
article = Article( **vals )
db.session.add( article )
db.session.flush()
new_article_id = article.article_id
_set_seqno( article, article.pub_id )
_save_authors( article, updated )
_save_scenarios( article, updated )
_save_image( article, updated )
_save_authors( article )
_save_scenarios( article )
_save_image( article )
db.session.commit()
_logger.debug( "- New ID: %d", new_article_id )
search.add_or_update_article( None, article )
search.add_or_update_article( None, article, None )
# generate the response
extras = { "article_id": new_article_id }
if request.args.get( "list" ):
extras[ "authors" ] = do_get_authors()
extras[ "scenarios" ] = do_get_scenarios()
extras[ "tags" ] = do_get_tags()
if article.pub_id:
pub = Publication.query.get( article.pub_id )
extras[ "_publication" ] = asl_articles.publications.get_publication_vals( pub, True )
return make_ok_response( updated=updated, extras=extras, warnings=warnings )
vals = get_article_vals( article, True )
return make_ok_response( record=vals, warnings=warnings )
def _set_seqno( article, pub_id ):
"""Set an article's seq#."""
@ -121,7 +123,7 @@ def _set_seqno( article, pub_id ):
else:
article.article_seqno = None
def _save_authors( article, updated_fields ):
def _save_authors( article ):
"""Save the article's authors."""
# delete the existing article-author rows
@ -131,8 +133,6 @@ def _save_authors( article, updated_fields ):
# add the article-author rows
authors = request.json.get( "article_authors", [] )
author_ids = []
new_authors = False
for seq_no,author in enumerate( authors ):
if isinstance( author, int ):
# this is an existing author
@ -145,19 +145,12 @@ def _save_authors( article, updated_fields ):
db.session.add( author )
db.session.flush()
author_id = author.author_id
new_authors = True
_logger.debug( "Created new author \"%s\": id=%d", author, author_id )
db.session.add(
ArticleAuthor( seq_no=seq_no, article_id=article.article_id, author_id=author_id )
)
author_ids.append( author_id )
# check if we created any new authors
if new_authors:
# yup - let the caller know about them
updated_fields[ "article_authors"] = author_ids
def _save_scenarios( article, updated_fields ):
def _save_scenarios( article ):
"""Save the article's scenarios."""
# delete the existing article-scenario rows
@ -167,8 +160,6 @@ def _save_scenarios( article, updated_fields ):
# add the article-scenario rows
scenarios = request.json.get( "article_scenarios", [] )
scenario_ids = []
new_scenarios = False
for seq_no,scenario in enumerate( scenarios ):
if isinstance( scenario, int ):
# this is an existing scenario
@ -181,19 +172,12 @@ def _save_scenarios( article, updated_fields ):
db.session.add( new_scenario )
db.session.flush()
scenario_id = new_scenario.scenario_id
new_scenarios = True
_logger.debug( "Created new scenario \"%s [%s]\": id=%d", scenario[1], scenario[0], scenario_id )
db.session.add(
ArticleScenario( seq_no=seq_no, article_id=article.article_id, scenario_id=scenario_id )
)
scenario_ids.append( scenario_id )
# check if we created any new scenarios
if new_scenarios:
# yup - let the caller know about them
updated_fields[ "article_scenarios"] = scenario_ids
def _save_image( article, updated ):
def _save_image( article ):
"""Save the article's image."""
# check if a new image was provided
@ -205,7 +189,7 @@ def _save_image( article, updated ):
ArticleImage.query.filter( ArticleImage.article_id == article.article_id ).delete()
if image_data == "{remove}":
# NOTE: The front-end sends this if it wants the article to have no image.
updated[ "article_image_id" ] = None
article.article_image_id = None
return
# add the new image to the database
@ -215,7 +199,6 @@ def _save_image( article, updated ):
db.session.add( img )
db.session.flush()
_logger.debug( "Created new image: %s, #bytes=%d", fname, len(image_data) )
updated[ "article_image_id" ] = article.article_id
# ---------------------------------------------------------------------
@ -229,44 +212,53 @@ def update_article():
log = ( _logger, "Update article: id={}".format( article_id ) )
)
warnings = []
updated = clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
# NOTE: Tags are stored in the database using \n as a separator, so we need to encode *after* cleaning them.
cleaned_tags = clean_tags( vals.get("article_tags"), warnings )
vals[ "article_tags" ] = encode_tags( cleaned_tags )
if cleaned_tags != vals.get( "article_tags" ):
updated[ "article_tags" ] = decode_tags( vals["article_tags"] )
# update the article
article = Article.query.get( article_id )
if not article:
abort( 404 )
orig_pub = Publication.query.get( article.pub_id ) if article.pub_id else None
if vals["pub_id"] != article.pub_id:
_set_seqno( article, vals["pub_id"] )
vals[ "time_updated" ] = datetime.datetime.now()
apply_attrs( article, vals )
_save_authors( article, updated )
_save_scenarios( article, updated )
_save_image( article, updated )
if not vals.get( "publ_id" ):
article.article_date = None
_save_authors( article )
_save_scenarios( article )
_save_image( article )
db.session.commit()
search.add_or_update_article( None, article )
search.add_or_update_article( None, article, None )
# generate the response
extras = {}
if request.args.get( "list" ):
extras[ "authors" ] = do_get_authors()
extras[ "scenarios" ] = do_get_scenarios()
extras[ "tags" ] = do_get_tags()
pubs = []
if orig_pub and orig_pub.pub_id != article.pub_id:
pubs.append( asl_articles.publications.get_publication_vals( orig_pub, True ) )
if article.pub_id:
pub = Publication.query.get( article.pub_id )
pubs.append( asl_articles.publications.get_publication_vals( pub, True ) )
if pubs:
extras[ "_publications" ] = pubs
return make_ok_response( updated=updated, extras=extras, warnings=warnings )
vals = get_article_vals( article, True )
return make_ok_response( record=vals, warnings=warnings )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@app.route( "/article/update-rating", methods=["POST"] )
def update_article_rating():
"""Update an article's rating."""
# parse the input
article_id = request.json[ "article_id" ]
new_rating = int( request.json[ "rating" ] )
if new_rating < 0 or new_rating > 3:
raise ValueError( "Invalid rating." )
# update the article's rating
article = Article.query.get( article_id )
if not article:
abort( 404 )
article.article_rating = new_rating
db.session.commit()
search.add_or_update_article( None, article, None )
return "OK"
# ---------------------------------------------------------------------
@ -287,11 +279,4 @@ def delete_article( article_id ):
search.delete_articles( [ article ] )
# generate the response
extras = {}
if request.args.get( "list" ):
extras[ "authors" ] = do_get_authors()
extras[ "tags" ] = do_get_tags()
if article.pub_id:
pub = Publication.query.get( article.pub_id )
extras[ "_publication" ] = asl_articles.publications.get_publication_vals( pub, True )
return make_ok_response( extras=extras )
return make_ok_response()

@ -1,27 +1,38 @@
""" Handle author requests. """
from flask import jsonify
import logging
from flask import jsonify, abort
from asl_articles import app
from asl_articles.models import Author
_logger = logging.getLogger( "db" )
# ---------------------------------------------------------------------
@app.route( "/authors" )
def get_authors():
"""Get all authors."""
return jsonify( do_get_authors() )
return jsonify( {
author.author_id: get_author_vals( author )
for author in Author.query.all()
} )
def do_get_authors():
"""Get all authors."""
# ---------------------------------------------------------------------
# get all the authors
return {
r.author_id: _get_author_vals(r)
for r in Author.query #pylint: disable=not-an-iterable
}
@app.route( "/author/<author_id>" )
def get_author( author_id ):
"""Get an author."""
_logger.debug( "Get author: id=%s", author_id )
author = Author.query.get( author_id )
if not author:
abort( 404 )
vals = get_author_vals( author )
_logger.debug( "- %s", author )
return jsonify( vals )
def _get_author_vals( author ):
def get_author_vals( author ):
"""Extract public fields from an Author record."""
return {
"author_id": author.author_id,

@ -1,61 +1,8 @@
[System]
; Allowed HTML tags and attributes. If not specified, the lxml defaults will be used:
; https://github.com/lxml/lxml/blob/master/src/lxml/html/defs.py
;HTML_ATTR_WHITELIST =
; Allowed HTML tags (default = allow all).
;HTML_TAG_WHITELIST =
[Search aliases]
; This section defines search term aliases.
; For example, the entry "a = b ; c" means that searching for "a" will become "( a OR b OR c )".
; NOTE: Searching for "b" or "c" will be unaffected.
latw = atmm ; atr ; baz ; mol-p ; mol-projector ; piat ; pf ; pfk ; psk
sw = support weapon ; lmg ; mmg ; hmg ; mtr ; mortar ; dc ; ft ; radio ; phone ; rcl ; atmm ; atr ; baz ; mol-p ; mol-projector ; piat ; pf ; pfk ; psk
fortifications = cave ; a-t ditch ; foxhole ; sangar ; trench ; bunker ; minefield ; mines ; booby trap ; panji ; pillbox ; roadblock ; tetrahedron ; wire
vehicles = tank ; halftrack ; half-track ; jeep ; carrier
illumination = starshell ; illuminating round ; trip flare
[Search aliases 2]
; This section defines search term aliases.
; For example, the entry "a = b = c" means that searching for any of "a" or "b" or "c" will all become "( a OR b OR c )".
asl = Advanced Squad Leader
mmp = Multi-Man Publishing = Multiman Publishing
ah = Avalon Hill
vftt = View From The Trenches
dftb = Dispatches From The Bunker
ch = Critical Hit
aslj = ASL Journal
rb = red barricades
votg = valor of the guards
kgp = kampfgrupper peiper
kgs = kampfgrupper scherer
brt = br:t = blood reef tarawa
pb = pegasus bridge
dc = demo charge
ft = flamethrower
baz = bazooka
pf = panzerfaust
psk = panzershreck
wp = white phosphorous
mol = molotov cocktail
ovr = overrun
cc = close combat
thh = tank-hunter hero
scw = shaped-charge weapon
; NOTE: We can't define "sw" here since we've defined it above.
; sw = support weapon
mg = machinegun = machine gun
ammo = ammunition
lc = landing craft
ht = halftrack
wa = wall advantage
hob = heat of battle
cg = campaign game
firelane = fire-lane = fire lane
firegroup = fire-group = fire group
armor = armour
humor = humour
; Allowed HTML attributes. If not specified, the lxml defaults will be used:
; https://github.com/lxml/lxml/blob/master/src/lxml/html/defs.py
HTML_ATTR_WHITELIST = style

@ -0,0 +1,42 @@
[Author aliases]
Andrew Hershey = Andrew H. Hershey
Andy Goldin = CPT Andy Goldin
Bob Medrow = Robert Medrow
Bruce Bakken = Bruce E. Bakken
Carl Fago = Carl D. Fago
Charlie Kibler = Charles Kibler
Chas Smith = Captain Chas Smith
Chris Doary = Chris "Clouseaux" Doary
Derek Tocher = Derek A. Tocher
Ed Beekman = Edward Beekman
Jeff Shields = Jeffrey Shields
Joe Suchar = Joseph Suchar
John Slotwinski = Dr. John Slotwinski
Jon Mishcon = M. J. Mishcon = M. Johnathon Mishcon
JR Van Mechelen = Jonathan Van Mechelen
Mark Nixon = Mark C. Nixon
Mark Walz = Mark S. Walz
Matt Cicero = Matthew Cicero
Matt Shostak = Matthew Shostak
Michael Dorosh = Michael A. Dorosh
Mike Clay = Dr. Michael Clay
Mike Conklin = Michael Conklin = Michael "6+3" Conklin
Mike Licari = Michael Licari = Michael J. Licari
Paul Venard = Paul J. Venard
Ray Tapio = Raymond J. Tapio
Rex Martin = Rex A. Martin
Robert Seulowitz = Dr. Rob Seulowitz
Robert Walden = Bob Walden
Rob Modarelli = Robert Modarelli = Captain Robert Modarelli III
Roy Connelly = Roy W. Connelly
Russ Bunten = Russell Bunten
Sam Rockwell = Samuel Rockwell
Scott Jackson = Scott "Stonewall" Jackson
Scott Thompson = Scott E. Thompson
Seth Fancher = Seth W. Fancher
Steve Linton = Steven Linton
Steve Pleva = Steven J. Pleva = Steve "Gor Gor" Pleva
Steve Swann = Steve C. Swann = Steven Swann = Steven C. Swann
Tom Huntington = Thomas Huntington
Trevor Edwards = Trev Edwards

@ -3,7 +3,7 @@
import os
APP_NAME = "ASL Articles"
APP_VERSION = "v0.1" # nb: also update setup.py
APP_VERSION = "v1.1" # nb: also update setup.py
APP_DESCRIPTION = "Searchable index of ASL articles."
BASE_DIR = os.path.abspath( os.path.join( os.path.split(__file__)[0], ".." ) )

@ -0,0 +1,67 @@
[Search weights]
; This section defines the relative weights of the searchable fields (see _SEARCHABLE_COL_NAMES).
; Each hit in a field scores 1 point, unless otherwise specified otherwise here.
tags = 10
name = 5
name2 = 3
authors = 5
[Search aliases]
; This section defines search term aliases.
; For example, the entry "a = b ; c" means that searching for "a" will become "( a OR b OR c )".
; NOTE: Searching for "b" or "c" will be unaffected.
latw = atmm ; atr ; baz ; mol-p ; mol-projector ; piat ; pf ; pfk ; psk
sw = support weapon ; lmg ; mmg ; hmg ; mtr ; mortar ; dc ; ft ; radio ; phone ; rcl ; atmm ; atr ; baz ; mol-p ; mol-projector ; piat ; pf ; pfk ; psk
fortifications = cave ; a-t ditch ; foxhole ; sangar ; trench ; bunker ; minefield ; mines ; booby trap ; panji ; pillbox ; roadblock ; tetrahedron ; wire
entrenchments = foxhole ; trench ; ditch
vehicles = tank ; halftrack ; half-track ; jeep ; carrier
illumination = starshell ; illuminating round ; trip flare
[Search aliases 2]
; This section defines search term aliases.
; For example, the entry "a = b = c" means that searching for any of "a" or "b" or "c" will all become "( a OR b OR c )".
asl = Advanced Squad Leader
mmp = Multi-Man Publishing = Multiman Publishing
ah = Avalon Hill
vftt = View From The Trenches
dftb = Dispatches From The Bunker
ch = Critical Hit
aslj = ASL Journal
rb = red barricades
votg = valor of the guards
kgp = kampfgrupper peiper
kgs = kampfgrupper scherer
brt = br:t = blood reef tarawa
pb = pegasus bridge
dc = demo charge
ft = flamethrower
baz = bazooka
pf = panzerfaust
psk = panzershreck
wp = white phosphorous
mol = molotov cocktail
ovr = overrun
cc = close combat
thh = tank-hunter hero
scw = shaped-charge weapon
; NOTE: We can't define "sw" here since we've defined it above.
; sw = support weapon
mg = machinegun = machine gun
ammo = ammunition
lc = landing craft
ht = halftrack
wa = wall advantage
hob = heat of battle
cg = campaign game
pbm = pbem
firelane = fire-lane = fire lane
firegroup = fire-group = fire group
armor = armour
humor = humour

@ -7,3 +7,9 @@ DB_CONN_STRING = ...
; Base directory for external documents.
EXTERNAL_DOCS_BASEDIR = ...
; Base directory for user files.
USER_FILES_BASEDIR = ...
; Base URL for the eASLRB (e.g. http://localhost:5020).
ASLRB_BASE_URL = ...

@ -0,0 +1,149 @@
""" Generate the database report. """
import urllib.request
import urllib.error
import hashlib
from collections import defaultdict
from flask import request, jsonify, abort
from asl_articles import app, db
# ---------------------------------------------------------------------
@app.route( "/db-report/row-counts" )
def get_db_row_counts():
"""Get the database row counts."""
results = {}
for table_name in [
"publisher", "publication", "article", "author",
"publisher_image", "publication_image", "article_image",
"scenario"
]:
query = db.engine.execute( "SELECT count(*) FROM {}".format( table_name ) )
results[ table_name ] = query.scalar()
return jsonify( results )
# ---------------------------------------------------------------------
@app.route( "/db-report/links" )
def get_db_links():
"""Get all links in the database."""
# initialize
results = {}
def find_db_links( table_name, col_names ):
links = []
query = db.engine.execute( "SELECT * FROM {}".format( table_name ) )
for row in query:
url = row[ col_names[1] ]
if not url:
continue
obj_id = row[ col_names[0] ]
name = col_names[2]( row ) if callable( col_names[2] ) else row[ col_names[2] ]
links.append( [ obj_id, name, url ] )
results[ table_name ] = links
# find all links
find_db_links( "publisher", [
"publ_id", "publ_url", "publ_name"
] )
find_db_links( "publication", [
"pub_id", "pub_url", _get_pub_name
] )
find_db_links( "article", [
"article_id", "article_url", "article_title"
] )
return jsonify( results )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@app.route( "/db-report/check-link", methods=["POST"] )
def check_db_link():
"""Check if a link appears to be working."""
url = request.args.get( "url" )
try:
req = urllib.request.Request( url, method="HEAD" )
with urllib.request.urlopen( req ) as resp:
resp_code = resp.code
except urllib.error.URLError as ex:
resp_code = getattr( ex, "code", None )
if not resp_code:
resp_code = 400
if resp_code != 200:
abort( resp_code )
return "ok"
# ---------------------------------------------------------------------
@app.route( "/db-report/images" )
def get_db_images():
"""Analyze the images stored in the database."""
# initialize
results = {}
image_hashes = defaultdict( list )
def find_images( table_name, col_names, get_name ):
# find rows in the specified table that have images
sql = "SELECT {cols}, image_data" \
" FROM {table}_image LEFT JOIN {table}" \
" ON {table}_image.{id_col} = {table}.{id_col}".format(
cols = ",".join( "{}.{}".format( table_name, c ) for c in col_names ),
table = table_name,
id_col = col_names[0]
)
rows = [
dict( row )
for row in db.engine.execute( sql )
]
# save the image hashes
for row in rows:
image_hash = hashlib.md5( row["image_data"] ).hexdigest()
image_hashes[ image_hash ].append( [
table_name, row[col_names[0]], get_name(row)
] )
# save the image sizes
image_sizes = [
[ len(row["image_data"]), row[col_names[0]], get_name(row) ]
for row in rows
]
image_sizes.sort( key = lambda r: r[0], reverse=True )
results[ table_name ] = image_sizes
# look for images in each table
find_images( "publisher",
[ "publ_id", "publ_name" ],
lambda row: row["publ_name"]
)
find_images( "publication",
[ "pub_id", "pub_name", "pub_edition" ],
_get_pub_name
)
find_images( "article",
[ "article_id", "article_title" ],
lambda row: row["article_title"]
)
# look for duplicate images
results["duplicates"] = {}
for image_hash, images in image_hashes.items():
if len(images) == 1:
continue
results["duplicates"][ image_hash ] = images
return results
# ---------------------------------------------------------------------
def _get_pub_name( row ):
"""Get a publication's display name."""
name = row["pub_name"]
if row["pub_edition"]:
name += " ({})".format( row["pub_edition"] )
return name

@ -20,3 +20,18 @@ def get_external_doc( path ):
fname = os.path.join( os.environ["EXTERNAL_DOCS_BASEDIR"], path )
abort( 404, "Can't find file: {}".format( fname ) )
return send_from_directory( base_dir, path )
# ---------------------------------------------------------------------
@app.route( "/user-files/<path:path>" )
def get_user_file( path ):
"""Return a user-defined file."""
base_dir = app.config.get( "USER_FILES_BASEDIR" )
if not base_dir:
abort( 404, "USER_FILES_BASEDIR not configured." )
fname = os.path.join( base_dir, path )
if not os.path.isfile( fname ):
if app.config["_IS_CONTAINER"]:
fname = os.path.join( os.environ["USER_FILES_BASEDIR"], path )
abort( 404, "Can't find file: {}".format( fname ) )
return send_from_directory( base_dir, path )

@ -21,5 +21,5 @@ def get_image( image_type, image_id ):
abort( 404 )
return send_file(
io.BytesIO( img.image_data ),
attachment_filename = img.image_filename # nb: so that Flask can set the MIME type
download_name = img.image_filename # nb: so that Flask can set the MIME type
)

@ -1,7 +1,5 @@
""" Main handlers. """
from flask import request
from asl_articles import app
# ---------------------------------------------------------------------
@ -10,11 +8,3 @@ from asl_articles import app
def ping():
"""Let the caller know we're alive (for testing porpoises)."""
return "pong"
# ---------------------------------------------------------------------
@app.route( "/shutdown" )
def shutdown():
"""Shutdown the server (for testing porpoises)."""
request.environ.get( "werkzeug.server.shutdown" )()
return ""

@ -23,6 +23,7 @@ class Publisher( db.Model ):
#
publ_image = db.relationship( "PublisherImage", backref="parent_publ", passive_deletes=True )
publications = db.relationship( "Publication", backref="parent_publ", passive_deletes=True )
articles = db.relationship( "Article", backref="parent_publ", passive_deletes=True )
def __repr__( self ):
return "<Publisher:{}|{}>".format( self.publ_id, self.publ_name )
@ -38,6 +39,7 @@ class Publication( db.Model ):
pub_date = db.Column( db.String(100) ) # nb: this is just a display string
pub_description = db.Column( db.String(1000) )
pub_url = db.Column( db.String(500) )
pub_seqno = db.Column( db.Integer )
pub_tags = db.Column( db.String(1000) )
publ_id = db.Column( db.Integer,
db.ForeignKey( Publisher.__table__.c.publ_id, ondelete="CASCADE" )
@ -61,14 +63,19 @@ class Article( db.Model ):
article_id = db.Column( db.Integer, primary_key=True )
article_title = db.Column( db.String(200), nullable=False )
article_subtitle = db.Column( db.String(200) )
article_date = db.Column( db.String(100) ) # nb: this is just a display string
article_snippet = db.Column( db.String(5000) )
article_seqno = db.Column( db.Integer )
article_pageno = db.Column( db.String(20) )
article_url = db.Column( db.String(500) )
article_tags = db.Column( db.String(1000) )
article_rating = db.Column( db.Integer )
pub_id = db.Column( db.Integer,
db.ForeignKey( Publication.__table__.c.pub_id, ondelete="CASCADE" )
)
publ_id = db.Column( db.Integer,
db.ForeignKey( Publisher.__table__.c.publ_id, ondelete="CASCADE" )
)
# NOTE: time_created should be non-nullable, but getting this to work on both SQLite and Postgres
# is more trouble than it's worth :-/
time_created = db.Column( db.TIMESTAMP(timezone=True) )

@ -5,11 +5,12 @@ import base64
import logging
from flask import request, jsonify, abort
from sqlalchemy.sql.expression import func
from asl_articles import app, db
from asl_articles.models import Publication, PublicationImage, Article
from asl_articles.articles import get_article_vals, get_article_sort_key
from asl_articles.tags import do_get_tags
import asl_articles.publishers
from asl_articles import search
from asl_articles.utils import get_request_args, clean_request_args, clean_tags, encode_tags, decode_tags, \
apply_attrs, make_ok_response
@ -23,14 +24,10 @@ _FIELD_NAMES = [ "*pub_name", "pub_edition", "pub_description", "pub_date", "pub
@app.route( "/publications" )
def get_publications():
"""Get all publications."""
return jsonify( do_get_publications() )
def do_get_publications():
"""Get all publications."""
# NOTE: The front-end maintains a cache of the publications, so as a convenience,
# we return the current list as part of the response to a create/update/delete operation.
results = Publication.query.all()
return { r.pub_id: get_publication_vals(r,False) for r in results }
return jsonify( {
pub.pub_id: get_publication_vals( pub, False, False )
for pub in Publication.query.all()
} )
# ---------------------------------------------------------------------
@ -41,22 +38,27 @@ def get_publication( pub_id ):
pub = Publication.query.get( pub_id )
if not pub:
abort( 404 )
vals = get_publication_vals( pub, False )
vals = get_publication_vals( pub,
request.args.get( "include_articles" ),
request.args.get( "deep" )
)
# include the number of associated articles
query = Article.query.filter_by( pub_id = pub_id )
vals[ "nArticles" ] = query.count()
_logger.debug( "- %s ; #articles=%d", pub, vals["nArticles"] )
return jsonify( vals )
def get_publication_vals( pub, include_articles, add_type=False ):
def get_publication_vals( pub, include_articles, deep ):
"""Extract public fields from a Publication record."""
vals = {
"_type": "publication",
"pub_id": pub.pub_id,
"pub_name": pub.pub_name,
"pub_edition": pub.pub_edition,
"pub_date": pub.pub_date,
"pub_description": pub.pub_description,
"pub_url": pub.pub_url,
"pub_seqno": pub.pub_seqno,
"pub_image_id": pub.pub_id if pub.pub_image else None,
"pub_tags": decode_tags( pub.pub_tags ),
"publ_id": pub.publ_id,
@ -64,15 +66,26 @@ def get_publication_vals( pub, include_articles, add_type=False ):
}
if include_articles:
articles = sorted( pub.articles, key=get_article_sort_key )
vals[ "articles" ] = [ get_article_vals( a ) for a in articles ]
if add_type:
vals[ "type" ] = "publication"
vals[ "articles" ] = [ get_article_vals( a, False ) for a in articles ]
if deep:
vals[ "_parent_publ" ] = asl_articles.publishers.get_publisher_vals(
pub.parent_publ, False, False
) if pub.parent_publ else None
return vals
def get_publication_sort_key( pub ):
"""Get a publication's sort key."""
# NOTE: This is used to sort publications within their parent publisher.
return int( pub.time_created.timestamp() ) if pub.time_created else 0
# FUDGE! We used to sort by time_created, but later added a seq#, so we now want to
# sort by seq# first, then fallback to time_created.
# NOTE: We assume that the seq# values are all small, and less than any timestamp.
# This means that any seq# value will appear before any timestamp in the sort order.
if pub.pub_seqno:
return pub.pub_seqno
elif pub.time_created:
return int( pub.time_created.timestamp() )
else:
return 0
# ---------------------------------------------------------------------
@ -85,31 +98,45 @@ def create_publication():
log = ( _logger, "Create publication:" )
)
warnings = []
updated = clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
# NOTE: Tags are stored in the database using \n as a separator, so we need to encode *after* cleaning them.
cleaned_tags = clean_tags( vals.get("pub_tags"), warnings )
vals[ "pub_tags" ] = encode_tags( cleaned_tags )
if cleaned_tags != vals.get( "pub_tags" ):
updated[ "pub_tags" ] = decode_tags( vals["pub_tags"] )
# create the new publication
vals[ "time_created" ] = datetime.datetime.now()
pub = Publication( **vals )
db.session.add( pub )
_save_image( pub, updated )
_set_seqno( pub, pub.publ_id )
_save_image( pub )
db.session.commit()
_logger.debug( "- New ID: %d", pub.pub_id )
search.add_or_update_publication( None, pub )
search.add_or_update_publication( None, pub, None )
# generate the response
extras = { "pub_id": pub.pub_id }
if request.args.get( "list" ):
extras[ "publications" ] = do_get_publications()
extras[ "tags" ] = do_get_tags()
return make_ok_response( updated=updated, extras=extras, warnings=warnings )
def _save_image( pub, updated ):
vals = get_publication_vals( pub, False, True )
return make_ok_response( record=vals, warnings=warnings )
def _set_seqno( pub, publ_id ):
"""Set a publication's seq#."""
if publ_id:
# NOTE: Since we currently don't provide a way to set the seq# in the UI,
# we leave a gap between the seq# we assign here, to allow the user to manually
# insert publications into the gap at a later time.
max_seqno = db.session.query( func.max( Publication.pub_seqno ) ) \
.filter( Publication.publ_id == publ_id ) \
.scalar()
if max_seqno is None:
pub.pub_seqno = 1
elif max_seqno == 1:
pub.pub_seqno = 10
else:
pub.pub_seqno = max_seqno + 10
else:
pub.pub_seqno = None
def _save_image( pub ):
"""Save the publication's image."""
# check if a new image was provided
@ -121,7 +148,7 @@ def _save_image( pub, updated ):
PublicationImage.query.filter( PublicationImage.pub_id == pub.pub_id ).delete()
if image_data == "{remove}":
# NOTE: The front-end sends this if it wants the publication to have no image.
updated[ "pub_image_id" ] = None
pub.pub_image_id = None
return
# add the new image to the database
@ -131,7 +158,6 @@ def _save_image( pub, updated ):
db.session.add( img )
db.session.flush()
_logger.debug( "Created new image: %s, #bytes=%d", fname, len(image_data) )
updated[ "pub_image_id" ] = pub.pub_id
# ---------------------------------------------------------------------
@ -145,22 +171,22 @@ def update_publication():
log = ( _logger, "Update publication: id={}".format( pub_id ) )
)
warnings = []
updated = clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
article_order = request.json.get( "article_order" )
# NOTE: Tags are stored in the database using \n as a separator, so we need to encode *after* cleaning them.
cleaned_tags = clean_tags( vals.get("pub_tags"), warnings )
vals[ "pub_tags" ] = encode_tags( cleaned_tags )
if cleaned_tags != vals.get( "pub_tags" ):
updated[ "pub_tags" ] = decode_tags( vals["pub_tags"] )
# update the publication
pub = Publication.query.get( pub_id )
if not pub:
abort( 404 )
if vals["publ_id"] != pub.publ_id:
_set_seqno( pub, vals["publ_id"] )
vals[ "time_updated" ] = datetime.datetime.now()
apply_attrs( pub, vals )
_save_image( pub, updated )
_save_image( pub )
if article_order:
query = Article.query.filter( Article.pub_id == pub_id )
articles = { int(a.article_id): a for a in query }
@ -177,14 +203,11 @@ def update_publication():
pub_id, ", ".join(str(k) for k in articles)
)
db.session.commit()
search.add_or_update_publication( None, pub )
search.add_or_update_publication( None, pub, None )
# generate the response
extras = {}
if request.args.get( "list" ):
extras[ "publications" ] = do_get_publications()
extras[ "tags" ] = do_get_tags()
return make_ok_response( updated=updated, extras=extras, warnings=warnings )
vals = get_publication_vals( pub, False, True )
return make_ok_response( record=vals, warnings=warnings )
# ---------------------------------------------------------------------
@ -211,8 +234,5 @@ def delete_publication( pub_id ):
search.delete_articles( deleted_articles )
# generate the response
extras = { "deleteArticles": deleted_articles }
if request.args.get( "list" ):
extras[ "publications" ] = do_get_publications()
extras[ "tags" ] = do_get_tags()
extras = { "deletedArticles": deleted_articles }
return make_ok_response( extras=extras )

@ -8,7 +8,8 @@ from flask import request, jsonify, abort
from asl_articles import app, db
from asl_articles.models import Publisher, PublisherImage, Publication, Article
from asl_articles.publications import do_get_publications
from asl_articles.publications import get_publication_vals, get_publication_sort_key
from asl_articles.articles import get_article_vals, get_article_sort_key
from asl_articles import search
from asl_articles.utils import get_request_args, clean_request_args, make_ok_response, apply_attrs
@ -21,14 +22,10 @@ _FIELD_NAMES = [ "*publ_name", "publ_description", "publ_url" ]
@app.route( "/publishers" )
def get_publishers():
"""Get all publishers."""
return jsonify( _do_get_publishers() )
def _do_get_publishers():
"""Get all publishers."""
# NOTE: The front-end maintains a cache of the publishers, so as a convenience,
# we return the current list as part of the response to a create/update/delete operation.
results = Publisher.query.all()
return { r.publ_id: get_publisher_vals(r) for r in results }
return jsonify( {
publ.publ_id: get_publisher_vals( publ, False, False )
for publ in Publisher.query.all()
} )
# ---------------------------------------------------------------------
@ -40,7 +37,10 @@ def get_publisher( publ_id ):
publ = Publisher.query.get( publ_id )
if not publ:
abort( 404 )
vals = get_publisher_vals( publ )
vals = get_publisher_vals( publ,
request.args.get( "include_pubs" ),
request.args.get( "include_articles" )
)
# include the number of associated publications
query = Publication.query.filter_by( publ_id = publ_id )
vals[ "nPublications" ] = query.count()
@ -48,21 +48,28 @@ def get_publisher( publ_id ):
query = db.session.query( Article, Publication ) \
.filter( Publication.publ_id == publ_id ) \
.filter( Article.pub_id == Publication.pub_id )
vals[ "nArticles" ] = query.count()
nArticles = query.count()
nArticles2 = Article.query.filter_by( publ_id = publ_id ).count()
vals[ "nArticles" ] = nArticles + nArticles2
_logger.debug( "- %s ; #publications=%d ; #articles=%d", publ, vals["nPublications"], vals["nArticles"] )
return jsonify( vals )
def get_publisher_vals( publ, add_type=False ):
def get_publisher_vals( publ, include_pubs, include_articles ):
"""Extract public fields from a Publisher record."""
vals = {
"_type": "publisher",
"publ_id": publ.publ_id,
"publ_name": publ.publ_name,
"publ_description": publ.publ_description,
"publ_url": publ.publ_url,
"publ_image_id": publ.publ_id if publ.publ_image else None,
}
if add_type:
vals[ "type" ] = "publisher"
if include_pubs:
pubs = sorted( publ.publications, key=get_publication_sort_key )
vals[ "publications" ] = [ get_publication_vals( p, False, False ) for p in pubs ]
if include_articles:
articles = sorted( publ.articles, key=get_article_sort_key )
vals[ "articles" ] = [ get_article_vals( a, False ) for a in articles ]
return vals
# ---------------------------------------------------------------------
@ -76,24 +83,22 @@ def create_publisher():
log = ( _logger, "Create publisher:" )
)
warnings = []
updated = clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
# create the new publisher
vals[ "time_created" ] = datetime.datetime.now()
publ = Publisher( **vals )
db.session.add( publ )
_save_image( publ, updated )
_save_image( publ )
db.session.commit()
_logger.debug( "- New ID: %d", publ.publ_id )
search.add_or_update_publisher( None, publ )
search.add_or_update_publisher( None, publ, None )
# generate the response
extras = { "publ_id": publ.publ_id }
if request.args.get( "list" ):
extras[ "publishers" ] = _do_get_publishers()
return make_ok_response( updated=updated, extras=extras, warnings=warnings )
vals = get_publisher_vals( publ, True, True )
return make_ok_response( record=vals, warnings=warnings )
def _save_image( publ, updated ):
def _save_image( publ ):
"""Save the publisher's image."""
# check if a new image was provided
@ -105,7 +110,7 @@ def _save_image( publ, updated ):
PublisherImage.query.filter( PublisherImage.publ_id == publ.publ_id ).delete()
if image_data == "{remove}":
# NOTE: The front-end sends this if it wants the publisher to have no image.
updated[ "publ_image_id" ] = None
publ.publ_image_id = None
return
# add the new image to the database
@ -115,7 +120,6 @@ def _save_image( publ, updated ):
db.session.add( img )
db.session.flush()
_logger.debug( "Created new image: %s, #bytes=%d", fname, len(image_data) )
updated[ "publ_image_id" ] = publ.publ_id
# ---------------------------------------------------------------------
@ -129,23 +133,21 @@ def update_publisher():
log = ( _logger, "Update publisher: id={}".format( publ_id ) )
)
warnings = []
updated = clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
clean_request_args( vals, _FIELD_NAMES, warnings, _logger )
# update the publication
# update the publisher
publ = Publisher.query.get( publ_id )
if not publ:
abort( 404 )
_save_image( publ, updated )
_save_image( publ )
vals[ "time_updated" ] = datetime.datetime.now()
apply_attrs( publ, vals )
db.session.commit()
search.add_or_update_publisher( None, publ )
search.add_or_update_publisher( None, publ, None )
# generate the response
extras = {}
if request.args.get( "list" ):
extras[ "publishers" ] = _do_get_publishers()
return make_ok_response( updated=updated, extras=extras, warnings=warnings )
vals = get_publisher_vals( publ, True, True )
return make_ok_response( record=vals, warnings=warnings )
# ---------------------------------------------------------------------
@ -179,7 +181,4 @@ def delete_publisher( publ_id ):
search.delete_articles( deleted_articles )
extras = { "deletedPublications": deleted_pubs, "deletedArticles": deleted_articles }
if request.args.get( "list" ):
extras[ "publishers" ] = _do_get_publishers()
extras[ "publications" ] = do_get_publications()
return make_ok_response( extras=extras )

@ -10,16 +10,12 @@ from asl_articles.models import Scenario
@app.route( "/scenarios" )
def get_scenarios():
"""Get all scenarios."""
return jsonify( do_get_scenarios() )
return jsonify( {
scenario.scenario_id: get_scenario_vals( scenario )
for scenario in Scenario.query.all()
} )
def do_get_scenarios():
"""Get all scenarios."""
return {
s.scenario_id: _get_scenario_vals( s )
for s in Scenario.query #pylint: disable=not-an-iterable
}
def _get_scenario_vals( scenario ):
def get_scenario_vals( scenario ):
"""Extract public fields from a scenario record."""
return {
"scenario_id": scenario.scenario_id,

@ -2,7 +2,6 @@
import os
import sqlite3
import configparser
import itertools
import random
import tempfile
@ -18,17 +17,20 @@ from asl_articles.models import Publisher, Publication, Article, Author, Scenari
from asl_articles.publishers import get_publisher_vals
from asl_articles.publications import get_publication_vals, get_publication_sort_key
from asl_articles.articles import get_article_vals, get_article_sort_key
from asl_articles.utils import decode_tags, to_bool
from asl_articles.utils import AppConfigParser, decode_tags, to_bool, squash_spaces
_search_index_path = None
_search_aliases = {}
_search_weights = {}
_author_aliases = {}
_logger = logging.getLogger( "search" )
_SQLITE_FTS_SPECIAL_CHARS = "+-#':/.@$"
# NOTE: The column order defined here is important, since we have to access row results by column index.
_SEARCHABLE_COL_NAMES = [ "name", "name2", "description", "authors", "scenarios", "tags" ]
_get_publisher_vals = lambda p: get_publisher_vals( p, True )
_get_publisher_vals = lambda p: get_publisher_vals( p, True, True )
_get_publication_vals = lambda p: get_publication_vals( p, True, True )
_get_article_vals = lambda a: get_article_vals( a, True )
@ -65,17 +67,17 @@ class SearchDbConn:
# ---------------------------------------------------------------------
def _get_authors( article ):
def _get_authors( article, session ):
"""Return the searchable authors for an article."""
query = db.session.query( Author, ArticleAuthor ) \
query = (session or db.session).query( Author, ArticleAuthor ) \
.filter( ArticleAuthor.article_id == article.article_id ) \
.join( Author, ArticleAuthor.author_id == Author.author_id ) \
.order_by( ArticleAuthor.seq_no )
return "\n".join( a[0].author_name for a in query )
def _get_scenarios( article ):
def _get_scenarios( article, session ):
"""Return the searchable scenarios for an article."""
query = db.session.query( Scenario, ArticleScenario ) \
query = (session or db.session).query( Scenario, ArticleScenario ) \
.filter( ArticleScenario.article_id == article.article_id ) \
.join( Scenario, ArticleScenario.scenario_id == Scenario.scenario_id ) \
.order_by( ArticleScenario.seq_no )
@ -95,11 +97,12 @@ def _get_tags( tags ):
_FIELD_MAPPINGS = {
"publisher": { "name": "publ_name", "description": "publ_description" },
"publication": { "name": "pub_name", "description": "pub_description",
"tags": lambda pub: _get_tags( pub.pub_tags )
"tags": lambda pub,_: _get_tags( pub.pub_tags )
},
"article": { "name": "article_title", "name2": "article_subtitle", "description": "article_snippet",
"authors": _get_authors, "scenarios": _get_scenarios,
"tags": lambda article: _get_tags( article.article_tags )
"tags": lambda article,_: _get_tags( article.article_tags ),
"rating": "article_rating"
}
}
@ -117,7 +120,7 @@ def search():
def search_publishers():
"""Return all publishers."""
publs = sorted( Publisher.query.all(), key=lambda p: p.publ_name.lower() )
results = [ get_publisher_vals( p, True ) for p in publs ]
results = [ get_publisher_vals( p, True, True ) for p in publs ]
return jsonify( results )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -128,7 +131,7 @@ def search_publisher( publ_id ):
publ = Publisher.query.get( publ_id )
if not publ:
return jsonify( [] )
results = [ get_publisher_vals( publ, True ) ]
results = [ get_publisher_vals( publ, True, True ) ]
pubs = sorted( publ.publications, key=get_publication_sort_key, reverse=True )
for pub in pubs:
results.append( get_publication_vals( pub, True, True ) )
@ -145,7 +148,9 @@ def search_publication( pub_id ):
results = [ get_publication_vals( pub, True, True ) ]
articles = sorted( pub.articles, key=get_article_sort_key )
for article in articles:
results.append( get_article_vals( article, True ) )
article = get_article_vals( article, True )
_create_aslrb_links( article )
results.append( article )
return jsonify( results )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -156,11 +161,13 @@ def search_article( article_id ):
article = Article.query.get( article_id )
if not article:
return jsonify( [] )
results = [ get_article_vals( article, True ) ]
if article.pub_id:
pub = Publication.query.get( article.pub_id )
if pub:
results.append( get_publication_vals( pub, True, True ) )
vals = get_article_vals( article, True )
_create_aslrb_links( vals )
results = [ vals ]
if article.parent_pub:
results.append( get_publication_vals( article.parent_pub, True, True ) )
if article.parent_publ:
results.append( get_publisher_vals( article.parent_publ, True, True ) )
return jsonify( results )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -168,11 +175,19 @@ def search_article( article_id ):
@app.route( "/search/author/<author_id>", methods=["POST","GET"] )
def search_author( author_id ):
"""Search for an author."""
author = Author.query.get( author_id )
if not author:
try:
author_id = int( author_id )
except ValueError:
return jsonify( [] )
author_ids = _author_aliases.get( author_id, [author_id] )
authors = Author.query.filter( Author.author_id.in_( author_ids ) ).all()
if not authors:
return jsonify( [] )
author_name = '"{}"'.format( author.author_name.replace( '"', '""' ) )
return _do_search( author_name, [ "authors" ] )
author_names = [
'"{}"'.format( a.author_name.replace( '"', '""' ) )
for a in authors
]
return _do_search( " OR ".join(author_names), [ "authors" ] )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -251,15 +266,22 @@ def _do_fts_search( fts_query_string, col_names, results=None ): #pylint: disabl
# in the same thread they were created in.
with SearchDbConn() as dbconn:
# generate the search weights
weights = []
weights.append( 0.0 ) # nb: this is for the "owner" column
for col_name in _SEARCHABLE_COL_NAMES:
weights.append( _search_weights.get( col_name, 1.0 ) )
# run the search
hilites = [ "", "" ] if no_hilite else [ BEGIN_HILITE, END_HILITE ]
def highlight( n ):
return "highlight( searchable, {}, '{}', '{}' )".format(
n, hilites[0], hilites[1]
)
sql = "SELECT owner, rank, {}, {}, {}, {}, {}, {} FROM searchable" \
sql = "SELECT owner, bm25(searchable,{}) AS rank, {}, {}, {}, {}, {}, {}, rating FROM searchable" \
" WHERE searchable MATCH ?" \
" ORDER BY rank".format(
" ORDER BY rating DESC, rank".format(
",".join( str(w) for w in weights ),
highlight(1), highlight(2), highlight(3), highlight(4), highlight(5), highlight(6)
)
match = "{{ {} }}: {}".format(
@ -279,11 +301,13 @@ def _do_fts_search( fts_query_string, col_names, results=None ): #pylint: disabl
# prepare the result for the front-end
result = globals()[ "_get_{}_vals".format( owner_type ) ]( obj )
result[ "type" ] = owner_type
result[ "_type" ] = owner_type
result[ "rank" ] = row[1]
# return highlighted versions of the content to the caller
fields = _FIELD_MAPPINGS[ owner_type ]
for col_no,col_name in enumerate(["name","name2","description"]):
assert _SEARCHABLE_COL_NAMES[:3] == [ "name", "name2", "description" ]
for col_no,col_name in enumerate(_SEARCHABLE_COL_NAMES[:3]):
field = fields.get( col_name )
if not field:
continue
@ -299,6 +323,10 @@ def _do_fts_search( fts_query_string, col_names, results=None ): #pylint: disabl
if row[7] and BEGIN_HILITE in row[7]:
result[ "tags!" ] = row[7].split( "\n" )
# create links to the eASLRB
if owner_type == "article":
_create_aslrb_links( result )
# add the result to the list
results.append( result )
@ -310,61 +338,241 @@ def _do_fts_search( fts_query_string, col_names, results=None ): #pylint: disabl
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _make_fts_query_string( query_string, search_aliases ):
def _make_fts_query_string( query_string, search_aliases ): #pylint: disable=too-many-statements,too-many-locals
"""Generate the SQLite query string."""
# check if this looks like a raw FTS query
if any( regex.search(query_string) for regex in _PASSTHROUGH_REGEXES ):
return query_string
# split the query string (taking into account quoted phrases)
words = query_string.split()
i = 0
while True:
if i >= len(words):
break
if i > 0 and words[i-1].startswith('"'):
words[i-1] += " {}".format( words[i] )
del words[i]
if words[i-1].startswith('"') and words[i-1].endswith('"'):
words[i-1] = words[i-1][1:-1]
continue
i += 1
# clean up quoted phrases
words = [ w[1:] if w.startswith('"') else w for w in words ]
words = [ w[:-1] if w.endswith('"') else w for w in words ]
words = [ w.strip() for w in words ]
words = [ w for w in words if w ]
# quote any phrases that need it
def has_special_char( word ):
return any( ch in word for ch in _SQLITE_FTS_SPECIAL_CHARS+" " )
def quote_word( word ):
return '"{}"'.format(word) if has_special_char(word) else word
words = [ quote_word(w) for w in words ]
# handle search aliases
for i,word in enumerate(words):
word = word.lower()
if word.startswith( '"' ) and word.endswith( '"' ):
word = word[1:-1]
aliases = search_aliases.get( word )
if aliases:
aliases = [ quote_word( a ) for a in aliases ]
aliases.sort() # nb: so that tests will work reliably
words[i] = "({})".format(
" OR ".join( aliases )
)
# initialize
query_string = squash_spaces( query_string )
is_raw_query = any( regex.search(query_string) for regex in _PASSTHROUGH_REGEXES )
# set the order in which we will check search aliases (longest to shortest,
# because we want an alias of "aa bb cc" to take priority over "bb".
search_aliases = sorted( search_aliases.items(), key=lambda a: len(a[0]), reverse=True )
def is_word_char( ch ):
return ch.isalnum() or ch in "_-#"
def is_word( start, end ):
"""Check if the string segment starts/ends on a word boundary."""
if start > 0 and is_word_char( buf[start-1] ):
return False
if end < len(buf) and is_word_char( buf[end] ):
return False
return True
# look for search aliases
buf = query_string.lower()
matches = []
for alias in search_aliases:
pos = 0
while True:
# look for the next instance of the alias
start = buf.find( alias[0], pos )
if start < 0:
break
# found one, check if it's a separate word
end = start + len(alias[0])
pos = end
if not is_word( start, end ):
continue
# check if it's quoted
if buf[start-1] == '"' and buf[end] == '"':
# yup - remove the quotes
start -= 1
end += 1
# save the location of the match (and what it will be replaced with)
matches.append( ( start, end, alias[1] ) )
# remove the matching string (for safety, to stop it from being matched again later)
buf = buf[:start] + "#"*len(alias[0]) + buf[end:]
def make_replacement_text( val ):
"""Generate the query sub-clause for alias replacement text."""
if isinstance( val, str ):
return quote( val )
else:
assert isinstance( val, list )
return "({})".format( " OR ".join( quote(v) for v in val ) )
def quote( val ):
"""Quote a string, if necessary."""
# NOTE: We used to check for fully-quoted values i.e.
# not ( startswith " and endswith " )
# which becomes:
# not startswith " or not endswith "
# but this doesn't work with quoted multi-word phrases that contain special characters
# e.g. "J. R. Tracy", since we see that the first phrase ("J.) is not fully-quoted,
# and so we wrap it in quotes :-/ Instead, if we see a quote at either end of the word,
# we treat it as part of a quoted phrase (either single- or multi-word), and use it verbatim.
if not val.startswith( '"' ) and not val.endswith( '"' ):
if any( ch in val for ch in _SQLITE_FTS_SPECIAL_CHARS+" " ):
val = '"{}"'.format( val )
return val.replace( "'", "''" )
def tokenize( val ):
"""Split a string into tokens (taking into account quoted phrases)."""
if is_raw_query:
return [ val.strip() ]
tokens = []
DQUOTE_MARKER = "<!~!>"
for word in val.split():
# FUDGE! It's difficult to figure out if we have a multi-word quoted phrase when the query string
# contains nested quotes, so we hack around this by temporarily removing the inner quotes.
word = word.replace( '""', DQUOTE_MARKER )
if len(tokens) > 0:
if tokens[-1].startswith( '"' ) and not tokens[-1].endswith( '"' ):
# the previous token is a the start of a quoted phrase - continue it
tokens[-1] += " " + word
continue
tokens.append( quote( word ) )
if len(tokens) > 0 and tokens[-1].startswith( '"' ) and not tokens[-1].endswith( '"' ):
# we have an unterminated quoted phrase, terminate it
tokens[-1] += '"'
return [
t.replace( DQUOTE_MARKER, '""' )
for t in tokens if t
]
# split the query string into parts (alias replacement texts, and everything else)
parts, pos = [], 0
for match in matches:
if pos < match[0]:
# extract the text up to the start of the next match, and tokenize it
parts.extend( tokenize( query_string[ pos : match[0] ] ) )
# replace the next match with its replacement text
parts.append( make_replacement_text( match[2] ) )
pos = match[1]
if pos < len(query_string):
# extract any remaining text, and tokenize it
parts.extend( tokenize( query_string[pos:] ) )
# clean up the parts
parts = [ p for p in parts if p not in ('"','""') ]
# NOTE: Quoted phrases are not handled properly if alias replacement happens inside them e.g.
# "MMP News" -> (mmp OR "Multi-Man Publishing" OR "Multiman Publishing") AND News
# but it's difficult to know what to do in this case. If we have an alias "foo" => "bar",
# then this search query:
# "foo xyz"
# should really become:
# ("foo xyz" OR "bar xyz")
# but this would be ridiculously complicated to implement, and far more trouble than it's worth.
# We can end up with un-matched quotes in these cases, so we try to clean them up here.
def clean_part( val ):
if len(val) > 1:
if val.startswith( '"' ) and not val.endswith( '"' ):
return val[1:]
if not val.startswith( '"' ) and val.endswith( '"' ):
return val[:-1]
return val
parts = [ clean_part(p) for p in parts ]
return (" " if is_raw_query else " AND ").join( parts )
# escape any special characters
words = [ w.replace("'","''") for w in words ]
# ---------------------------------------------------------------------
# regex's that specify what a ruleid looks like
_RULEID_REGEXES = [
re.compile( r"\b[A-Z]\d{0,3}\.\d{1,5}[A-Za-z]?\b" ),
# nb: while there are ruleid's like "C5", it's far more likely this is referring to a hex :-/
#re.compile( r"\b[A-Z]\d{1,4}[A-Za-z]?\b" ),
]
return " AND ".join( words )
def _create_aslrb_links( article ):
"""Create links to the ASLRB for ruleid's."""
# initialize
base_url = app.config.get( "ASLRB_BASE_URL", os.environ.get("ASLRB_BASE_URL") )
if not base_url:
return
if "article_snippet!" in article:
snippet = article[ "article_snippet!" ]
else:
snippet = article[ "article_snippet" ]
if not snippet:
return
def make_link( startpos, endpos, ruleid, caption ):
nonlocal snippet
if ruleid:
link = "<a href='{}#{}' class='aslrb' target='_blank'>{}</a>".format(
base_url, ruleid, caption
)
snippet = snippet[:startpos] + link + snippet[endpos:]
else:
# NOTE: We can get here when a manually-created link has no ruleid e.g. because the content
# contains something that is incorrectly being detected as a ruleid, and the user has fixed it up.
snippet = snippet[:startpos] + caption + snippet[endpos:]
# find ruleid's in the snippet and replace them with links to the ASLRB
matches = _find_aslrb_ruleids( snippet )
for match in reversed(matches):
startpos, endpos, ruleid, caption = match
make_link( startpos, endpos, ruleid, caption )
article[ "article_snippet!" ] = snippet
def _find_aslrb_ruleids( val ): #pylint: disable=too-many-branches
"""Find ruleid's."""
# locate any manually-created links; format is "{:ruleid|caption:}"
# NOTE: The ruleid is optional, so that if something is incorrectly being detected as a ruleid,
# the user can disable the link by creating one of these with no ruleid.
manual = list( re.finditer( r"{:(.*?)\|(.+?):}", val ) )
def is_manual( target ):
return any(
target.start() >= mo.start() and target.end() <= mo.end()
for mo in manual
)
# look for ruleid's
matches = []
for regex in _RULEID_REGEXES:
for mo in regex.finditer( val ):
if is_manual( mo ):
continue # nb: ignore any ruleid's that are part of a manually-created link
matches.append( mo )
# FUDGE! Remove overlapping matches e.g. if we have "B1.23", we will have matches for "B1" and "B1.23".
matches2, prev_mo = [], None
matches.sort( key=lambda mo: mo.start() )
for mo in matches:
if prev_mo and mo.start() == prev_mo.start() and len(mo.group()) < len(prev_mo.group()):
continue
matches2.append( mo )
prev_mo = mo
# extract the start/end positions of each match, ruleid and caption
matches = [
[ mo.start(), mo.end(), mo.group(), mo.group() ]
for mo in matches2
]
# NOTE: If we have something like "C1.23-.45", we want to link to "C1.23",
# but have the <a> tag wrap the whole thing.
# NOTE: This won't work if the user searched for "C1.23", since it will be wrapped
# in a highlight <span>.
for match in matches:
endpos = match[1]
if endpos == len(val) or val[endpos] != "-":
continue
nchars, allow_dot = 1, True
while endpos + nchars < len(val):
ch = val[ endpos + nchars ]
if ch.isdigit():
nchars += 1
elif ch == "." and allow_dot:
nchars += 1
allow_dot = False
else:
break
if nchars > 1:
match[1] += nchars
match[3] = val[ match[0] : match[1] ]
# add any manually-created links
for mo in manual:
matches.append( [ mo.start(), mo.end(), mo.group(1), mo.group(2) ] )
return sorted( matches, key=lambda m: m[0] )
# ---------------------------------------------------------------------
def init_search( session, logger ):
def init_search( session, logger, test_mode=False ):
"""Initialize the search engine."""
# initialize the database
@ -385,9 +593,11 @@ def init_search( session, logger ):
# NOTE: We would like to make "owner" the primary key, but FTS doesn't support primary keys
# (nor UNIQUE constraints), so we have to manage this manually :-(
# IMPORTANT: The column order is important here, since we use the column index to generate
# the bm25() clause when doing searches.
dbconn.conn.execute(
"CREATE VIRTUAL TABLE searchable USING fts5"
" ( owner, {}, tokenize='porter unicode61' )".format(
" ( owner, {}, rating, tokenize='porter unicode61' )".format(
", ".join( _SEARCHABLE_COL_NAMES )
)
)
@ -398,26 +608,66 @@ def init_search( session, logger ):
logger.debug( "Loading the search index..." )
logger.debug( "- Loading publishers." )
for publ in session.query( Publisher ).order_by( Publisher.time_created.desc() ):
add_or_update_publisher( dbconn, publ )
add_or_update_publisher( dbconn, publ, session )
logger.debug( "- Loading publications." )
for pub in session.query( Publication ).order_by( Publication.time_created.desc() ):
add_or_update_publication( dbconn, pub )
add_or_update_publication( dbconn, pub, session )
logger.debug( "- Loading articles." )
for article in session.query( Article ).order_by( Article.time_created.desc() ):
add_or_update_article( dbconn, article )
add_or_update_article( dbconn, article, session )
# load the search aliases
cfg = configparser.ConfigParser()
fname = os.path.join( asl_articles.config_dir, "app.cfg" )
_logger.debug( "Loading search aliases: %s", fname )
cfg.read( fname )
# configure the search engine
global _search_aliases
def get_section( section_name ):
try:
return cfg.items( section_name )
except configparser.NoSectionError:
return []
_search_aliases = _load_search_aliases( get_section("Search aliases"), get_section("Search aliases 2") )
_search_aliases = {}
global _search_weights
_search_weights = {}
fname = os.path.join( asl_articles.config_dir, "search.cfg" )
if os.path.isfile( fname ):
# load the search aliases
_logger.debug( "Loading search aliases: %s", fname )
cfg = AppConfigParser( fname )
_search_aliases = _load_search_aliases(
cfg.get_section( "Search aliases" ),
cfg.get_section( "Search aliases 2" )
)
# load the search weights
_logger.debug( "Loading search weights:" )
for row in cfg.get_section( "Search weights" ):
if row[0] not in _SEARCHABLE_COL_NAMES:
asl_articles.startup.log_startup_msg( "warning",
"Unknown search weight field: {}", row[0],
logger = _logger
)
continue
try:
_search_weights[ row[0] ] = float( row[1] )
_logger.debug( "- %s = %s", row[0], row[1] )
except ValueError:
asl_articles.startup.log_startup_msg( "warning",
"Invalid search weight for \"{}\": {}", row[0], row[1],
logger = _logger
)
# load the author aliases
# NOTE: These should really be stored in the database, but the UI would be so insanely hairy,
# we just keep them in a text file and let the user manage them manually :-/
global _author_aliases
_author_aliases = {}
fname = os.path.join( asl_articles.config_dir, "author-aliases.cfg" )
if os.path.isfile( fname ):
_logger.debug( "Loading author aliases: %s", fname )
cfg = AppConfigParser( fname )
_author_aliases = _load_author_aliases( cfg.get_section("Author aliases"), session, False )
if test_mode:
# NOTE: We load the test aliases here as well (since the test suite can't mock them,
# because we might be running in a different process).
fname = os.path.join( os.path.split(__file__)[0], "tests/fixtures/author-aliases.cfg" )
if os.path.isfile( fname ):
_logger.debug( "Loading test author aliases: %s", fname )
cfg = AppConfigParser( fname )
_author_aliases.update(
_load_author_aliases( cfg.get_section("Author aliases"), session, True )
)
def _load_search_aliases( aliases, aliases2 ):
"""Load the search aliases."""
@ -427,53 +677,94 @@ def _load_search_aliases( aliases, aliases2 ):
def add_search_alias( key, vals ):
if key in search_aliases:
_logger.warning( "Found duplicate search alias: %s", key )
search_aliases[ key ] =vals
asl_articles.startup.log_startup_msg( "warning",
"Found duplicate search alias: {}", key,
logger = _logger
)
search_aliases[ key.lower() ] = vals
# load the search aliases
for row in aliases:
vals = [ row[0] ]
vals.extend( v.strip() for v in row[1].split( ";" ) )
add_search_alias( row[0], vals )
vals.extend( v for v in row[1].split( ";" ) )
vals = [ squash_spaces(v) for v in vals ]
add_search_alias( vals[0], vals )
_logger.debug( "- %s => %s", row[0], vals )
# load the search aliases
for row in aliases2:
vals = itertools.chain( [row[0]], row[1].split("=") )
vals = [ v.strip().lower() for v in vals ]
vals = [ squash_spaces(v) for v in vals ]
_logger.debug( "- %s", vals )
for v in vals:
add_search_alias( v, vals )
return search_aliases
def _load_author_aliases( aliases, session, silent ):
"""Load the author aliases."""
# initialize
if not session:
session = db.session
# load the author aliases
author_aliases = {}
for row in aliases:
vals = itertools.chain( [row[0]], row[1].split("=") )
vals = [ v.strip() for v in vals ]
authors = []
for author_name in vals:
author = session.query( Author ).filter(
Author.author_name == author_name
).one_or_none()
if author:
authors.append( author )
else:
if not silent:
asl_articles.startup.log_startup_msg( "warning",
"Unknown author for alias: {}", author_name,
logger = _logger
)
if len(authors) <= 1:
continue
_logger.debug( "- %s", " ; ".join( str(a) for a in authors ) )
authors = [ a.author_id for a in authors ]
for author_id in authors:
author_aliases[ author_id ] = authors
return author_aliases
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def add_or_update_publisher( dbconn, publ ):
def add_or_update_publisher( dbconn, publ, session ):
"""Add/update a publisher in the search index."""
_do_add_or_update_searchable( dbconn, "publisher",
_make_publisher_key(publ), publ
_make_publisher_key(publ), publ,
session
)
def add_or_update_publication( dbconn, pub ):
def add_or_update_publication( dbconn, pub, session ):
"""Add/update a publication in the search index."""
_do_add_or_update_searchable( dbconn, "publication",
_make_publication_key(pub.pub_id), pub
_make_publication_key(pub.pub_id), pub,
session
)
def add_or_update_article( dbconn, article ):
def add_or_update_article( dbconn, article, session ):
"""Add/update an article in the search index."""
_do_add_or_update_searchable( dbconn, "article",
_make_article_key(article.article_id), article
_make_article_key(article.article_id), article,
session
)
def _do_add_or_update_searchable( dbconn, owner_type, owner, obj ):
def _do_add_or_update_searchable( dbconn, owner_type, owner, obj, session ):
"""Add or update a record in the search index."""
# prepare the fields
fields = _FIELD_MAPPINGS[ owner_type ]
vals = {
f: getattr( obj,fields[f] ) if isinstance( fields[f], str ) else fields[f]( obj )
f: getattr( obj, fields[f] ) if isinstance( fields[f], str ) else fields[f]( obj, session )
for f in fields
}
# NOTE: We used to strip HTML here, but we prefer to see formatted content
@ -481,14 +772,15 @@ def _do_add_or_update_searchable( dbconn, owner_type, owner, obj ):
def do_add_or_update( dbconn ):
sql = "INSERT INTO searchable" \
" ( owner, {} )" \
" VALUES (?,?,?,?,?,?,?)".format(
" ( owner, {}, rating )" \
" VALUES (?,?,?,?,?,?,?,?)".format(
",".join( _SEARCHABLE_COL_NAMES )
)
dbconn.conn.execute( sql, (
owner,
vals.get("name"), vals.get("name2"), vals.get("description"),
vals.get("authors"), vals.get("scenarios"), vals.get("tags")
vals.get("authors"), vals.get("scenarios"), vals.get("tags"),
vals.get("rating")
) )
# update the database
@ -543,3 +835,11 @@ def _make_publication_key( pub ):
def _make_article_key( article ):
"""Generate the owner key for an Article."""
return "article:{}".format( article.article_id if isinstance(article,Article) else article )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@app.route( "/init-search-for-test" )
def init_search_for_test():
"""Re-initialize the search engine (for testing porpoises)."""
init_search( db.session, logging.getLogger("search"), test_mode=True )
return "ok"

@ -19,4 +19,9 @@ def get_startup_msgs():
def log_startup_msg( msg_type, msg, *args, **kwargs ):
"""Log a startup message."""
_startup_msgs[ msg_type ].append( msg.format( *args, **kwargs ) )
logger = kwargs.pop( "logger", None )
msg = msg.format( *args, **kwargs )
if logger:
assert msg_type in ("info","warning","error")
getattr( logger, msg_type )( "%s", msg )
_startup_msgs[ msg_type ].append( msg )

@ -13,12 +13,7 @@ from asl_articles.utils import decode_tags
@app.route( "/tags" )
def get_tags():
"""Get all tags."""
return jsonify( do_get_tags() )
def do_get_tags():
"""Get all tags."""
# get all the tags
# NOTE: This is pretty inefficient, since an article/publication's tags are munged into one big string
# and stored in a single column, so we need to manually unpack everything, but we'll see how it goes...
tags = defaultdict( int )
@ -36,4 +31,4 @@ def do_get_tags():
key = lambda v: ( -v[1], v[0] ) # sort by # instances, then name
)
return tags
return jsonify( tags )

@ -0,0 +1,3 @@
""" Module definitions. """
pytest_options = None

@ -0,0 +1,3 @@
[Author aliases]
Chuck Jones = Charles M. Jones = Charles Martin Jones
Joe Blow = Joseph Blow

@ -0,0 +1,31 @@
{
"article": [
{ "article_id": 101, "article_title": "By Charles M. Jones" },
{ "article_id": 102, "article_title": "By Chuck Jones" },
{ "article_id": 103, "article_title": "By Charles Martin Jones" },
{ "article_id": 104, "article_title": "By Joseph Blow" },
{ "article_id": 105, "article_title": "By Joe Blow" },
{ "article_id": 106, "article_title": "By John Doe" }
],
"author": [
{ "author_id": 1, "author_name": "Charles M. Jones" },
{ "author_id": 2, "author_name": "Joseph Blow" },
{ "author_id": 3, "author_name": "Chuck Jones" },
{ "author_id": 4, "author_name": "Joe Blow" },
{ "author_id": 5, "author_name": "Charles Martin Jones" },
{ "author_id": 6, "author_name": "John Doe" }
],
"article_author": [
{ "seq_no": 1, "article_id": 101, "author_id": 1 },
{ "seq_no": 1, "article_id": 102, "author_id": 3 },
{ "seq_no": 1, "article_id": 103, "author_id": 5 },
{ "seq_no": 1, "article_id": 104, "author_id": 2 },
{ "seq_no": 1, "article_id": 105, "author_id": 4 },
{ "seq_no": 1, "article_id": 106, "author_id": 6 }
]
}

@ -0,0 +1,42 @@
{
"publisher": [
{ "publ_id": 1, "publ_name": "Avalon Hill", "publ_url": "http://{FLASK}/ping" },
{ "publ_id": 2, "publ_name": "Multiman Publishing", "publ_url": "http://{FLASK}/unknown" }
],
"publication": [
{ "pub_id": 10, "pub_name": "ASL Journal", "pub_edition": "1", "publ_id": 1, "pub_url": "/aslj-1.html" },
{ "pub_id": 11, "pub_name": "ASL Journal", "pub_edition": "2", "publ_id": 1, "pub_url": "/aslj-2.html" },
{ "pub_id": 20, "pub_name": "MMP News", "publ_id": 2 }
],
"article": [
{ "article_id": 100, "article_title": "ASLJ article 1", "pub_id": 10 },
{ "article_id": 101, "article_title": "ASLJ article 2", "pub_id": 10 },
{ "article_id": 110, "article_title": "ASLJ article 3", "pub_id": 11 },
{ "article_id": 200, "article_title": "MMP article", "pub_id": 20, "article_url": "/mmp.html" },
{ "article_id": 299, "article_title": "MMP publisher article", "publ_id": 2, "article_url": "/unknown" }
],
"article_author": [
{ "seq_no": 1, "article_id": 100, "author_id": 1000 },
{ "seq_no": 2, "article_id": 100, "author_id": 1001 },
{ "seq_no": 1, "article_id": 299, "author_id": 1000 }
],
"author": [
{ "author_id": 1000, "author_name": "Joe Blow" },
{ "author_id": 1001, "author_name": "Fred Nerk" },
{ "author_id": 1999, "author_name": "Alan Smithee" }
],
"article_scenario": [
{ "seq_no": 1, "article_id": 100, "scenario_id": 2000 },
{ "seq_no": 1, "article_id": 299, "scenario_id": 2001 }
],
"scenario": [
{ "scenario_id": 2000, "scenario_display_id": "ASL 1", "scenario_name": "The Guards Counterattack" },
{ "scenario_id": 2001, "scenario_display_id": "ASL 5", "scenario_name": "Hill 621" }
]
}

@ -0,0 +1 @@
Multiman Publishing.

@ -0,0 +1,11 @@
{
"publisher": [
{ "publ_id": 1, "publ_name": "Avalon Hill" }
],
"publication": [
{ "pub_id": 20, "pub_name": "ASL Journal", "publ_id": 1 }
]
}

@ -0,0 +1,17 @@
{
"publisher": [
{ "publ_id": 1, "publ_name": "Avalon Hill" },
{ "publ_id": 2, "publ_name": "Multiman Publishing" },
{ "publ_id": 3, "publ_name": "Le Franc Tireur" }
],
"publication": [
{ "pub_id": 20, "pub_name": "MMP News", "publ_id": 2 }
],
"article": [
{ "article_id": 200, "article_title": "MMP Today", "pub_id": 20 }
]
}

@ -5,6 +5,7 @@ import urllib.request
import urllib.error
import json
import base64
import re
from asl_articles.search import SEARCH_ALL_ARTICLES
from asl_articles.tests.utils import init_tests, select_main_menu_option, select_sr_menu_option, \
@ -277,8 +278,9 @@ def test_images( webdriver, flask_app, dbconn ): #pylint: disable=too-many-state
btn = find_child( ".row.image .remove-image", dlg )
assert btn.is_displayed()
# make sure the article's image is correct
resp = urllib.request.urlopen( image_url ).read()
assert resp == open( expected, "rb" ).read()
with urllib.request.urlopen( image_url ) as resp:
with open( expected, "rb" ) as fp:
assert resp.read() == fp.read()
else:
# make sure there is no image
img = find_child( ".row.image img.image", dlg )
@ -289,7 +291,8 @@ def test_images( webdriver, flask_app, dbconn ): #pylint: disable=too-many-state
# make sure the article's image is not available
url = flask_app.url_for( "get_image", image_type="article", image_id=article_id )
try:
resp = urllib.request.urlopen( url )
with urllib.request.urlopen( url ):
pass
assert False, "Should never get here!"
except urllib.error.HTTPError as ex:
assert ex.code == 404
@ -349,7 +352,8 @@ def test_parent_publisher( webdriver, flask_app, dbconn ):
# check that the parent publication was updated in the database
article_id = sr.get_attribute( "testing--article_id" )
url = flask_app.url_for( "get_article", article_id=article_id )
article = json.load( urllib.request.urlopen( url ) )
with urllib.request.urlopen( url ) as resp:
article = json.load( resp )
if expected_parent:
if article["pub_id"] != expected_parent[0]:
return None
@ -387,6 +391,157 @@ def test_parent_publisher( webdriver, flask_app, dbconn ):
# ---------------------------------------------------------------------
def test_publisher_articles( webdriver, flask_app, dbconn ): #pylint: disable=too-many-statements
"""Test articles that are associated with a publisher (not publication)."""
# initialize
init_tests( webdriver, flask_app, dbconn, fixtures="publisher-articles.json" )
def check_parent_in_sr( sr, pub, publ ):
"""Check the article's parent publication/publisher in a search result."""
if pub:
elem = wait_for( 2, lambda: find_child( ".header a.publication", sr ) )
assert elem.is_displayed()
assert elem.text == pub
assert re.search( r"^http://.+?/publication/\d+", elem.get_attribute( "href" ) )
elif publ:
elem = wait_for( 2, lambda: find_child( ".header a.publisher", sr ) )
assert elem.is_displayed()
assert elem.text == publ
assert re.search( r"^http://.+?/publisher/\d+", elem.get_attribute( "href" ) )
else:
assert False, "At least one publication/publisher must be specified."
def check_parent_in_dlg( dlg, pub, publ ):
"""Check the article's parent publication/publication in the edit dialog."""
if pub:
select = find_child( ".row.publication .react-select", dlg )
assert select.is_displayed()
assert select.text == pub
elif publ:
select = find_child( ".row.publisher .react-select", dlg )
assert select.is_displayed()
assert select.text == publ
else:
assert False, "At least one publication/publisher must be specified."
# create an article associated with LFT
create_article( {
"title": "test article",
"publisher": "Le Franc Tireur"
} )
results = wait_for( 2, get_search_results )
assert len(results) == 1
sr = results[0]
check_parent_in_sr( sr, None, "Le Franc Tireur" )
# open the article's dialog
select_sr_menu_option( sr, "edit" )
dlg = wait_for_elem( 2, "#article-form" )
check_parent_in_dlg( dlg, None, "Le Franc Tireur" )
# change the article to be associated with an MMP publication
find_child( ".row.publisher label.parent-mode" ).click()
select = wait_for_elem( 2, ".row.publication .react-select" )
ReactSelect( select ).select_by_name( "MMP News" )
find_child( "button.ok", dlg ).click()
results = wait_for( 2, get_search_results )
assert len(results) == 1
sr = results[0]
check_parent_in_sr( sr, "MMP News", None )
# open the article's dialog
select_sr_menu_option( sr, "edit" )
dlg = wait_for_elem( 2, "#article-form" )
check_parent_in_dlg( dlg, "MMP News", None )
# change the article to be associated with MMP (publisher)
find_child( ".row.publication label.parent-mode" ).click()
select = wait_for_elem( 2, ".row.publisher .react-select" )
ReactSelect( select ).select_by_name( "Multiman Publishing" )
find_child( "button.ok", dlg ).click()
results = wait_for( 2, get_search_results )
assert len(results) == 1
sr = results[0]
check_parent_in_sr( sr, None, "Multiman Publishing" )
# show the MMP publisher
results = do_search( "multiman" )
assert len(results) == 1
sr = results[0]
collapsibles = find_children( ".collapsible", sr )
assert len(collapsibles) == 2
items = find_children( "li a", collapsibles[1] )
assert len(items) == 1
item = items[0]
assert item.text == "test article"
assert re.search( r"^http://.+?/article/\d+", item.get_attribute( "href" ) )
# delete the MMP publisher
# NOTE: There are 2 MMP articles, the one that is in the "MMP News" publication,
# and the test article we created above that is associated with the publisher.
select_sr_menu_option( sr, "delete" )
check_ask_dialog( ( "Delete this publisher?", "2 articles will also be deleted" ), "ok" )
query = dbconn.execute( "SELECT count(*) FROM article" )
assert query.scalar() == 0
# ---------------------------------------------------------------------
def test_publisher_article_dates( webdriver, flask_app, dbconn ):
"""Test "published" dates for publisher articles."""
# initialize
init_tests( webdriver, flask_app, dbconn, disable_constraints=False, fixtures="publisher-article-dates.json" )
# initialize
article_title, article_date = "test article", "1st January, 2000"
article_sr = None
def check_article_date( has_date ):
# check the article's publication date
def do_check():
elem = find_child( ".article_date", article_sr )
article_id = article_sr.get_attribute( "testing--article_id" )
row = get_article_row( dbconn, article_id, ["article_date"] )
if has_date:
return elem.text == article_date and row[0] == article_date
else:
return not elem and not row[0]
wait_for( 2, do_check )
# create an article associated with a publication
create_article( {
"title": article_title,
"publication": "ASL Journal",
"snippet": "This is a test article.",
"pageno": 42,
"authors": [ "+Joe Blow" ]
} )
article_sr = wait_for( 2, lambda: find_search_result( article_title ) )
check_article_date( False )
# change the article to be associated with a publisher
edit_article( article_sr, {
"publisher": "Avalon Hill"
}, expected_constraints = [
"The article date was not specified."
], accept_constraints=True )
check_article_date( False )
# give the article a published date
edit_article( article_sr, {
"article_date": article_date
} )
check_article_date( True )
# change the article back to the publication
edit_article( article_sr, {
"publication": "ASL Journal"
} )
check_article_date( False )
# ---------------------------------------------------------------------
def test_unicode( webdriver, flask_app, dbconn ):
"""Test Unicode content."""
@ -429,7 +584,7 @@ def test_clean_html( webdriver, flask_app, dbconn ):
# create a article with HTML content
create_article( {
"title": "title: <span style='boo!'> <b>bold</b> <xxx>xxx</xxx> <i>italic</i> {}".format( replace[0] ),
"title": "title: <span onclick='boo!'> <b>bold</b> <xxx>xxx</xxx> <i>italic</i> {}".format( replace[0] ),
"subtitle": "<i>italicized subtitle</i> {}".format( replace[0] ),
"snippet": "bad stuff here: <script>HCF</script> {}".format( replace[0] )
}, toast_type="warning" )
@ -449,7 +604,7 @@ def test_clean_html( webdriver, flask_app, dbconn ):
# update the article with new HTML content
edit_article( sr, {
"title": "<div style='...'>updated</div>"
"title": "<div onclick='...'>updated</div>"
}, toast_type="warning" )
wait_for( 2, lambda: get_search_result_names() == ["updated"] )
assert check_toast( "warning", "Some values had HTML cleaned up.", contains=True )
@ -484,7 +639,65 @@ def test_timestamps( webdriver, flask_app, dbconn ):
# ---------------------------------------------------------------------
def create_article( vals, toast_type="info", expected_error=None, expected_constraints=None, dlg=None ):
def test_article_ratings( webdriver, flask_app, dbconn ):
"""Test article ratings."""
# initialize
init_tests( webdriver, flask_app, dbconn, fixtures="articles.json" )
def do_test( article_sr, star_no, expected ):
# click the specified article star
stars = find_children( ".rating-stars img", article_sr )
stars[ star_no ].click()
for sr_no,sr in enumerate(results):
assert get_rating(sr) == expected[sr_no]
# compare the ratings on-screen with what's in the database
for sr in results:
article_id = sr.get_attribute( "testing--article_id" )
ui_rating = get_rating( sr )
db_rating = dbconn.execute(
"SELECT article_rating FROM article WHERE article_id={}".format( article_id )
).scalar()
if db_rating is None:
assert ui_rating == 0
else:
assert ui_rating == db_rating
def get_rating( article_sr ):
stars = [
"disabled" not in star.get_attribute("src")
for star in find_children( ".rating-stars img", article_sr )
]
rating = 0
for star in stars:
if not star:
assert all( not s for s in stars[rating+1:] )
break
rating += 1
return rating
# get the test articles
results = do_search( SEARCH_ALL_ARTICLES )
# do the tests
do_test( results[0], 2, [3,0] )
do_test( results[1], 1, [3,2] )
# do the tests
do_test( results[0], 2, [2,2] )
do_test( results[0], 2, [3,2] )
do_test( results[0], 0, [1,2] )
do_test( results[0], 0, [0,2] )
do_test( results[0], 0, [1,2] )
# ---------------------------------------------------------------------
def create_article( vals, toast_type="info",
expected_error=None, expected_constraints=None, accept_constraints=False,
dlg=None
):
"""Create a new article."""
# initialize
@ -504,7 +717,9 @@ def create_article( vals, toast_type="info", expected_error=None, expected_const
return dlg # nb: the dialog is left on-screen
elif expected_constraints:
# we were expecting constraint warnings, confirm them
check_constraint_warnings( "Do you want to create this article?", expected_constraints, "cancel" )
check_constraint_warnings( "Do you want to create this article?",
expected_constraints, "ok" if accept_constraints else "cancel"
)
return dlg # nb: the dialog is left on-screen
else:
# we were expecting the create to work, confirm this
@ -516,7 +731,9 @@ def create_article( vals, toast_type="info", expected_error=None, expected_const
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def edit_article( sr, vals, toast_type="info", expected_error=None, expected_constraints=None ): #pylint: disable=too-many-branches
def edit_article( sr, vals, toast_type="info",
expected_error=None, expected_constraints=None, accept_constraints=False
): #pylint: disable=too-many-branches
"""Edit a article's details."""
# initialize
@ -538,7 +755,9 @@ def edit_article( sr, vals, toast_type="info", expected_error=None, expected_con
return dlg # nb: the dialog is left on-screen
elif expected_constraints:
# we were expecting constraint warnings, confirm them
check_constraint_warnings( "Do you want to update this article?", expected_constraints, "cancel" )
check_constraint_warnings( "Do you want to update this article?",
expected_constraints, "ok" if accept_constraints else "cancel"
)
return dlg # nb: the dialog is left on-screen
else:
# we were expecting the update to work, confirm this
@ -557,8 +776,14 @@ def _update_values( dlg, vals ):
change_image( dlg, val )
else:
remove_image( dlg )
elif key == "publication":
select = ReactSelect( find_child( ".row.publication .react-select", dlg ) )
elif key in ("publication", "publisher"):
row = find_child( ".row.{}".format( key ), dlg )
select = ReactSelect( find_child( ".react-select", row ) )
if not select.select.is_displayed():
key2 = "publisher" if key == "publication" else "publication"
row2 = find_child( ".row.{}".format( key2 ), dlg )
find_child( "label.parent-mode", row2 ).click()
wait_for( 2, select.select.is_displayed )
select.select_by_name( val )
elif key in ["authors","scenarios","tags"]:
select = ReactSelect( find_child( ".row.{} .react-select".format(key), dlg ) )

@ -92,5 +92,6 @@ def _check_authors( flask_app, all_authors, expected ):
# check the authors in the database
url = flask_app.url_for( "get_authors" )
authors = json.load( urllib.request.urlopen( url ) )
with urllib.request.urlopen( url ) as resp:
authors = json.load( resp )
assert set( a["author_name"] for a in authors.values() ) == all_authors

@ -0,0 +1,235 @@
""" Test the database reports. """
import os
import itertools
import re
import pytest
from asl_articles.search import SEARCH_ALL
from asl_articles.tests.test_publishers import edit_publisher
from asl_articles.tests.test_publications import edit_publication
from asl_articles.tests.test_articles import edit_article
from asl_articles.tests.utils import init_tests, \
select_main_menu_option, select_sr_menu_option, check_ask_dialog, \
do_search, find_search_result, get_search_results, \
wait_for, wait_for_elem, find_child, find_children
from asl_articles.tests import pytest_options
# ---------------------------------------------------------------------
def test_db_report( webdriver, flask_app, dbconn ):
"""Test the database report."""
# initialize
init_tests( webdriver, flask_app, dbconn, fixtures="db-report.json" )
# check the initial report
row_counts, links, dupe_images, image_sizes = _get_db_report()
assert row_counts == {
"publishers": 2, "publications": 3, "articles": 5,
"authors": 3, "scenarios": 2
}
assert links == {
"publishers": [ 2, [] ],
"publications": [ 2, [] ],
"articles": [ 2, [] ],
}
assert dupe_images == []
assert image_sizes == {}
# add some images
do_search( SEARCH_ALL )
publ_sr = find_search_result( "Avalon Hill", wait=2 )
fname = os.path.join( os.path.split(__file__)[0], "fixtures/images/1.gif" )
edit_publisher( publ_sr, { "image": fname } )
results = get_search_results()
pub_sr = find_search_result( "ASL Journal (1)", results )
fname = os.path.join( os.path.split(__file__)[0], "fixtures/images/2.gif" )
edit_publication( pub_sr, { "image": fname } )
article_sr = find_search_result( "ASLJ article 1", results )
fname = os.path.join( os.path.split(__file__)[0], "fixtures/images/3.gif" )
edit_article( article_sr, { "image": fname } )
article_sr = find_search_result( "ASLJ article 2", results )
fname = os.path.join( os.path.split(__file__)[0], "fixtures/images/3.gif" )
edit_article( article_sr, { "image": fname } )
# check the updated report
row_counts, _, dupe_images, image_sizes = _get_db_report()
assert row_counts == {
"publishers": 2, "publisher_images": 1,
"publications": 3, "publication_images": 1,
"articles": 5, "article_images": 2,
"authors": 3, "scenarios": 2
}
assert dupe_images == [
[ "f0457ea742376e76ff276ce62c7a8540", "/images/article/100",
( "ASLJ article 1", "/article/100" ),
( "ASLJ article 2", "/article/101" ),
]
]
assert image_sizes == {
"publishers": [
( "Avalon Hill", "/publisher/1", "/images/publisher/1" ),
],
"publications": [
( "ASL Journal (1)", "/publication/10", "/images/publication/10" ),
],
"articles": [
( "ASLJ article 1", "/article/100", "/images/article/100" ),
( "ASLJ article 2", "/article/101", "/images/article/101" ),
]
}
# delete all the publishers (and associated objects), then check the updated report
do_search( SEARCH_ALL )
publ_sr = find_search_result( "Avalon Hill", wait=2 )
select_sr_menu_option( publ_sr, "delete" )
check_ask_dialog( "Delete this publisher?", "ok" )
results = get_search_results()
publ_sr = find_search_result( "Multiman Publishing", results )
select_sr_menu_option( publ_sr, "delete" )
check_ask_dialog( "Delete this publisher?", "ok" )
row_counts, links, dupe_images, image_sizes = _get_db_report()
assert row_counts == {
"publishers": 0, "publications": 0, "articles": 0,
"authors": 3, "scenarios": 2
}
assert links == {
"publishers": [ 0, [] ],
"publications": [ 0, [] ],
"articles": [ 0, [] ],
}
assert dupe_images == []
assert image_sizes == {}
# ---------------------------------------------------------------------
# NOTE: This test may not work if we are running against Docker containers, because:
# - external URL's are created that point to the back-end's $/ping endpoint.
# - the front-end container realizes that these URL's need to be checked by the backend,
# so it sends them to the $/db-report/check-link endpoint.
# - these URL's may not resolve because they were generated using gAppRef.makeFlaskUrl(),
# which will work if the front-end container is sending a request to the back-end
# container, but may not work from inside the back-end container, because the port number
# being used by Flask *inside* the container may not be the same as *outside* the container.
# The problem is generating a URL that can be used as an external URL that will work everywhere.
# We could specify it as a parameter, but that's more trouble than it's worth.
@pytest.mark.skipif( pytest_options.flask_url is not None, reason="Testing against a remote Flask server." )
def test_check_db_links( webdriver, flask_app, dbconn ):
"""Test checking links in the database."""
# initialize
init_tests( webdriver, flask_app, dbconn, docs="docs/", fixtures="db-report.json" )
# check the initial report
_, links, _, _ = _get_db_report()
assert links == {
"publishers": [ 2, [] ],
"publications": [ 2, [] ],
"articles": [ 2, [] ],
}
# check the links
btn = find_child( "#db-report button.check-links" )
btn.click()
status = find_child( "#db-report .db-links .status-msg" )
wait_for( 10, lambda: status.text == "Checked 6 links." )
# check the updated report
_, links, _, _ = _get_db_report()
assert links == {
"publishers": [ 2, [
( "Multiman Publishing", "/publisher/2", "HTTP 404: http://{FLASK}/unknown" )
] ],
"publications": [ 2, [] ],
"articles": [ 2, [
( "MMP publisher article", "/article/299", "HTTP 404: /unknown" )
] ],
}
# ---------------------------------------------------------------------
def _get_db_report(): #pylint: disable=too-many-locals
"""Generate the database report."""
# generate the report
select_main_menu_option( "db-report" )
wait_for_elem( 2, "#db-report .db-images" )
# unload the row counts
row_counts = {}
table = find_child( "#db-report .db-row-counts" )
for row in find_children( "tr", table ):
cells = find_children( "td", row )
mo = re.search( r"^(\d+)( \((\d+) images?\))?$", cells[1].text )
key = cells[0].text.lower()[:-1]
row_counts[ key ] = int( mo.group(1) )
if mo.group( 3 ):
row_counts[ key[:-1] + "_images" ] = int( mo.group(3) )
# unload the links
links = {}
table = find_child( "#db-report .db-links" )
last_key = None
for row in find_children( "tr", table ):
cells = find_children( "td", row )
if len(cells) == 2:
last_key = cells[0].text.lower()[:-1]
links[ last_key ] = [ int( cells[1].text ) , [] ]
else:
mo = re.search( r"^(.+) \((.+)\)$", cells[0].text )
tags = find_children( "a", cells[0] )
url = _fixup_url( tags[0].get_attribute( "href" ) )
links[ last_key ][1].append( ( mo.group(1), url, mo.group(2) ) )
# unload duplicate images
dupe_images = []
for row in find_children( "#db-report .dupe-analysis .dupe-image" ):
elem = find_child( ".caption .hash", row )
mo = re.search( r"^\(md5:(.+)\)$", elem.text )
image_hash = mo.group(1)
image_url = _fixup_url( find_child( "img", row ).get_attribute( "src" ) )
parents = []
for entry in find_children( ".collapsible li", row ):
url = _fixup_url( find_child( "a", entry ).get_attribute( "href" ) )
parents.append( ( entry.text, url ) )
dupe_images.append( list( itertools.chain(
[ image_hash, image_url ], parents
) ) )
# unload the image sizes
tab_ctrl = find_child( "#db-report .db-images .react-tabs" )
image_sizes = {}
for tab in find_children( ".react-tabs__tab", tab_ctrl ):
key = tab.text.lower()
tab_id = tab.get_attribute( "id" )
tab.click()
sel = ".react-tabs__tab-panel[aria-labelledby='{}'].react-tabs__tab-panel--selected".format( tab_id )
tab_page = wait_for( 2,
lambda: find_child( sel, tab_ctrl ) #pylint: disable=cell-var-from-loop
)
parents = []
for row_no, row in enumerate( find_children( "table.image-sizes tr", tab_page ) ):
if row_no == 0:
continue
cells = find_children( "td", row )
image_url = _fixup_url( find_child( "img", cells[0] ).get_attribute( "src" ) )
url = _fixup_url( find_child( "a", cells[2] ).get_attribute( "href" ) )
parents.append( ( cells[2].text, url, image_url ) )
if parents:
image_sizes[ key ] = parents
else:
assert tab_page.text == "No images found."
return row_counts, links, dupe_images, image_sizes
# ---------------------------------------------------------------------
def _fixup_url( url ):
"""Fixup a URL to make it independent of its server."""
url = re.sub( r"^http://[^/]+", "", url )
pos = url.find( "?" )
if pos >= 0:
url = url[:pos]
return url

@ -0,0 +1,74 @@
""" Test previewing images. """
import os
from selenium.common.exceptions import ElementClickInterceptedException
from asl_articles.search import SEARCH_ALL_PUBLISHERS, SEARCH_ALL_PUBLICATIONS, SEARCH_ALL_ARTICLES
from asl_articles.tests.test_publishers import create_publisher, edit_publisher
from asl_articles.tests.test_publications import create_publication, edit_publication
from asl_articles.tests.test_articles import create_article, edit_article
from asl_articles.tests.utils import init_tests, find_child, find_children, wait_for, \
do_search, get_search_results, call_with_retry
# ---------------------------------------------------------------------
def test_image_preview( webdriver, flask_app, dbconn ):
"""Test previewing images."""
# initialize
init_tests( webdriver, flask_app, dbconn )
def do_test( create, edit, refresh ):
# create a new object
webdriver.refresh()
create()
results = get_search_results()
assert len(results) == 1
sr = results[0]
# add images to the object
# NOTE: We're testing that images in an object already on-screen is updated correctly.
fname = os.path.join( os.path.split(__file__)[0], "fixtures/images/1.gif" )
description = 'foo <img src="/images/app.png" style="height:2em;" class="preview"> bar'
edit( sr, fname, description )
_check_previewable_images( sr )
# refresh the object
# NOTE: We're testing that images in an object loaded afresh is set up correctly.
webdriver.refresh()
wait_for( 2, lambda: find_child( "#search-form" ) )
results = refresh()
assert len(results) == 1
_check_previewable_images( results[0] )
# do the tests
do_test(
lambda: create_publisher( { "name": "Test publisher" } ),
lambda sr, fname, description: edit_publisher( sr, { "image": fname, "description": description } ),
lambda: do_search( SEARCH_ALL_PUBLISHERS )
)
do_test(
lambda: create_publication( { "name": "Test publication" } ),
lambda sr, fname, description: edit_publication( sr, { "image": fname, "description": description } ),
lambda: do_search( SEARCH_ALL_PUBLICATIONS )
)
do_test(
lambda: create_article( { "title": "Test article" } ),
lambda sr, fname, description: edit_article( sr, { "image": fname, "snippet": description } ),
lambda: do_search( SEARCH_ALL_ARTICLES )
)
# ---------------------------------------------------------------------
def _check_previewable_images( sr ):
"""Check that previewable images are working correctly."""
images = list( find_children( "a.preview img", sr ) )
assert len(images) == 2
for img in images:
assert find_child( ".jquery-image-zoom" ) is None
img.click()
preview = wait_for( 2, lambda: find_child( ".jquery-image-zoom" ) )
call_with_retry( preview.click, [ElementClickInterceptedException] )
wait_for( 2, lambda: find_child( ".jquery-image-zoom" ) is None )

@ -8,7 +8,7 @@ from asl_articles.models import Scenario
from asl_articles.tests.utils import init_tests
sys.path.append( os.path.join( os.path.split(__file__)[0], "../../tools/" ) )
from import_roar_scenarios import import_roar_scenarios
from import_roar_scenarios import import_roar_scenarios #pylint: disable=import-error,wrong-import-order
# ---------------------------------------------------------------------
@ -18,7 +18,8 @@ def test_import_roar_scenarios( dbconn ):
# initialize
session = init_tests( None, None, dbconn )
roar_fname = os.path.join( os.path.split(__file__)[0], "fixtures/roar-scenarios.json" )
roar_data = json.load( open( roar_fname, "r" ) )
with open( roar_fname, "r", encoding="utf-8" ) as fp:
roar_data = json.load( fp )
# do the first import
_do_import( dbconn, session, roar_fname,

@ -246,8 +246,9 @@ def test_images( webdriver, flask_app, dbconn ): #pylint: disable=too-many-state
btn = find_child( ".row.image .remove-image", dlg )
assert btn.is_displayed()
# make sure the publication's image is correct
resp = urllib.request.urlopen( image_url ).read()
assert resp == open( expected, "rb" ).read()
with urllib.request.urlopen( image_url ) as resp:
with open( expected, "rb" ) as fp:
assert resp.read() == fp.read()
else:
# make sure there is no image
img = find_child( ".row.image img.image", dlg )
@ -258,7 +259,8 @@ def test_images( webdriver, flask_app, dbconn ): #pylint: disable=too-many-state
# make sure the publication's image is not available
url = flask_app.url_for( "get_image", image_type="publication", image_id=pub_id )
try:
resp = urllib.request.urlopen( url )
with urllib.request.urlopen( url ):
pass
assert False, "Should never get here!"
except urllib.error.HTTPError as ex:
assert ex.code == 404
@ -318,7 +320,8 @@ def test_parent_publisher( webdriver, flask_app, dbconn ):
# check that the parent publisher was updated in the database
pub_id = sr.get_attribute( "testing--pub_id" )
url = flask_app.url_for( "get_publication", pub_id=pub_id )
pub = json.load( urllib.request.urlopen( url ) )
with urllib.request.urlopen( url ) as resp:
pub = json.load( resp )
if expected_parent:
if pub["publ_id"] != expected_parent[0]:
return None
@ -448,7 +451,7 @@ def test_clean_html( webdriver, flask_app, dbconn ):
# create a publication with HTML content
create_publication( {
"name": "name: <span style='boo!'> <b>bold</b> <xxx>xxx</xxx> <i>italic</i> {}".format( replace[0] ),
"name": "name: <span onclick='boo!'> <b>bold</b> <xxx>xxx</xxx> <i>italic</i> {}".format( replace[0] ),
"edition": "<i>2</i>",
"description": "bad stuff here: <script>HCF</script> {}".format( replace[0] )
}, toast_type="warning" )
@ -467,7 +470,7 @@ def test_clean_html( webdriver, flask_app, dbconn ):
# update the publication with new HTML content
edit_publication( sr, {
"name": "<div style='...'>updated</div>"
"name": "<div onclick='...'>updated</div>"
}, toast_type="warning" )
results = get_search_results()
assert len(results) == 1
@ -672,8 +675,11 @@ def test_default_image( webdriver, flask_app, dbconn ):
f: os.path.join( os.path.split(__file__)[0], "fixtures/images/"+f )
for f in images
}
def read_image_data( fname ):
with open( fname, "rb" ) as fp:
return fp.read()
image_data = {
f: open( image_fnames[f], "rb" ).read()
f: read_image_data( image_fnames[f] )
for f in images
}
@ -690,8 +696,8 @@ def test_default_image( webdriver, flask_app, dbconn ):
if img:
assert expected
image_url = img.get_attribute( "src" )
resp = urllib.request.urlopen( image_url ).read()
assert resp == image_data[ expected ]
with urllib.request.urlopen( image_url ) as resp:
assert resp.read() == image_data[ expected ]
else:
assert not expected

@ -176,8 +176,9 @@ def test_images( webdriver, flask_app, dbconn ): #pylint: disable=too-many-state
btn = find_child( ".row.image .remove-image", dlg )
assert btn.is_displayed()
# make sure the publisher's image is correct
resp = urllib.request.urlopen( image_url ).read()
assert resp == open(expected,"rb").read()
with urllib.request.urlopen( image_url ) as resp:
with open( expected, "rb" ) as fp:
assert resp.read() == fp.read()
else:
# make sure there is no image
img = find_child( ".row.image img.image", dlg )
@ -188,7 +189,8 @@ def test_images( webdriver, flask_app, dbconn ): #pylint: disable=too-many-state
# make sure the publisher's image is not available
url = flask_app.url_for( "get_image", image_type="publisher", image_id=publ_id )
try:
resp = urllib.request.urlopen( url )
with urllib.request.urlopen( url ):
pass
assert False, "Should never get here!"
except urllib.error.HTTPError as ex:
assert ex.code == 404
@ -352,7 +354,7 @@ def test_clean_html( webdriver, flask_app, dbconn ):
# create a publisher with HTML content
create_publisher( {
"name": "name: <span style='boo!'> <b>bold</b> <xxx>xxx</xxx> <i>italic</i> {}".format( replace[0] ),
"name": "name: <span onclick='boo!'> <b>bold</b> <xxx>xxx</xxx> <i>italic</i> {}".format( replace[0] ),
"description": "bad stuff here: <script>HCF</script> {}".format( replace[0] )
}, toast_type="warning" )
@ -368,7 +370,7 @@ def test_clean_html( webdriver, flask_app, dbconn ):
# update the publisher with new HTML content
edit_publisher( sr, {
"name": "<div style='...'>updated</div>"
"name": "<div onclick='...'>updated</div>"
}, toast_type="warning" )
results = get_search_results()
assert len(results) == 1
@ -399,7 +401,7 @@ def test_publication_lists( webdriver, flask_app, dbconn ):
publ_sr = find_search_result( publ_name, results )
pubs = find_child( ".collapsible", publ_sr )
if pub_name:
# check that the publisher appears in the publisher's search result
# check that the publication appears in the publisher's search result
assert find_child( ".caption", pubs ).text == "Publications:"
pubs = find_children( "li", pubs )
assert len(pubs) == 1

@ -104,7 +104,8 @@ def _check_scenarios( flask_app, all_scenarios, expected ):
# check the scenarios in the database
url = flask_app.url_for( "get_scenarios" )
scenarios = json.load( urllib.request.urlopen( url ) )
with urllib.request.urlopen( url ) as resp:
scenarios = json.load( resp )
assert set( _make_scenario_display_name(a) for a in scenarios.values() ) == all_scenarios
def _make_scenario_display_name( scenario ):

@ -1,6 +1,6 @@
""" Test search operations. """
from asl_articles.search import _load_search_aliases, _make_fts_query_string
from asl_articles.search import _load_search_aliases, _make_fts_query_string, _find_aslrb_ruleids
from asl_articles.search import SEARCH_ALL
from asl_articles.tests.test_publishers import create_publisher, edit_publisher
@ -169,10 +169,9 @@ def test_multiple_search_results( webdriver, flask_app, dbconn ):
init_tests( webdriver, flask_app, dbconn, fixtures="search.json" )
# do a search
_do_test_search( "#asl", [
"View From The Trenches",
_do_test_search( "#aslj", [
"ASL Journal (4)", "ASL Journal (5)",
"Hunting DUKWs and Buffalos", "'Bolts From Above", "Hit 'Em High, Or Hit 'Em Low"
"Hunting DUKWs and Buffalos", "'Bolts From Above", "Hit 'Em High, Or Hit 'Em Low", "The Jungle Isn't Neutral"
] )
# do some searches
@ -506,17 +505,45 @@ def test_special_searches( webdriver, flask_app, dbconn ):
# ---------------------------------------------------------------------
def test_make_fts_query_string():
"""Test generating FTS query strings."""
def test_author_aliases( webdriver, flask_app, dbconn ):
"""Test author aliases."""
# initialize
search_aliases = _load_search_aliases(
[ ( "aaa", "bbb ; ccc" ) ],
[ ( "mmp", "Multi-Man Publishing = Multiman Publishing" ) ]
)
# NOTE: We can't monkeypatch the author aliases table, since we might be talking to
# a remote Flask server not under our control (e.g. in a Docker container). Instead,
# we define the aliases we need in a test config file, which is always loaded.
init_tests( webdriver, flask_app, dbconn, fixtures="author-aliases.json" )
def do_test( author_names ):
# test each author in the alias group
expected = set( "By {}".format(a) for a in author_names )
for author_name in author_names:
# find the author's article
results = do_search( '"{}"'.format( author_name ) )
assert len(results) == 1
# click on the author's name
authors = find_children( ".author", results[0] )
assert len(authors) == 1
authors[0].click()
# check that we found all the articles by the aliased names
wait_for( 2, lambda: set( get_search_result_names() ) == expected )
# test author aliases
do_test( [ "Charles M. Jones", "Chuck Jones", "Charles Martin Jones" ] )
do_test( [ "Joseph Blow", "Joe Blow" ] )
do_test( [ "John Doe" ] )
# ---------------------------------------------------------------------
def test_make_fts_query_string():
"""Test generating FTS query strings."""
def do_test( query, expected ):
assert _make_fts_query_string( query, search_aliases ) == expected
assert _make_fts_query_string( query, {} ) == expected
# test some query strings
do_test( "", "" )
@ -550,10 +577,27 @@ def test_make_fts_query_string():
' foo " xyz " bar ',
'foo AND xyz AND bar'
)
do_test(
' foo " xyz 123 " bar ',
'foo AND "xyz 123" AND bar'
)
# NOTE: We used to handle this case, but it's debatable what the right thing to do is :-/
# do_test(
# ' foo " xyz 123 " bar ',
# 'foo AND "xyz 123" AND bar'
# )
# test some quoted phrases that wrap special characters
do_test( 'Mr. Jones', '"Mr." AND Jones' )
do_test( '"Mr. Jones"', '"Mr. Jones"' )
do_test( 'foo "Mr. Jones" bar', 'foo AND "Mr. Jones" AND bar' )
# test nested quoted phrases
# NOTE: This is important since searching for an author wraps their name in double quotes,
# so we need to be able to handle a quoted phrase (e.g. a nickname) within the name.
do_test( 'Joseph "Joey" Blow', 'Joseph AND "Joey" AND Blow' )
do_test( 'Joseph "Joey Joe" Blow', 'Joseph AND "Joey Joe" AND Blow' )
do_test( 'Joseph ""Joey"" Blow', 'Joseph AND ""Joey"" AND Blow' )
# NOTE: This one doesn't work properly, but no-one is going to be doing this :-/
# do_test( 'Joseph ""Joey Joe"" Blow', 'Joseph AND ""Joey Joe"" AND Blow' )
do_test( '"Joseph ""Joey"" Blow"', '"Joseph ""Joey"" Blow"' )
do_test( '"Joseph ""Joey Joe"" Blow"', '"Joseph ""Joey Joe"" Blow"' )
# test some incorrectly quoted phrases
do_test( '"', '' )
@ -568,18 +612,111 @@ def test_make_fts_query_string():
do_test( "foo OR bar", "foo OR bar" )
do_test( "(a OR b)", "(a OR b)" )
# test search aliases
do_test( "aaa", "(aaa OR bbb OR ccc)" )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def test_search_aliases():
"""Test search aliases in query strings."""
# initialize
search_aliases = _load_search_aliases(
[ # one-way aliases
( "aa", "bbb ; cccc" ),
( "xXx", "x1 X2 ; x3"),
( "foo", "{FOO}" ),
( " foo bar ", " {FOO BAR} " ), # nb: spaces will be squashed and stripped
],
[ # two-way aliases
( " joe's nose ", " Joes Nose = Joseph's Nose " ) # nb: spaces will be squashed and stripped
]
)
def do_test( query, expected ):
assert _make_fts_query_string( query, search_aliases ) == expected
# test one-way aliases
do_test( "a", "a" )
do_test( "XaX", "XaX" )
do_test( "aa", "(aa OR bbb OR cccc)" )
do_test( 'abc "aa" xyz', "abc AND (aa OR bbb OR cccc) AND xyz" )
do_test( "XaaX", "XaaX" )
do_test( "aaa", "aaa" )
do_test( "XaaaX", "XaaaX" )
do_test( "bbb", "bbb" )
do_test( "ccc", "ccc" )
# test search aliases
do_test( "MMP", '("multi-man publishing" OR "multiman publishing" OR mmp)' )
do_test( "Xmmp", "Xmmp" )
do_test( "mmpX", "mmpX" )
do_test( "multi-man publishing", '"multi-man" AND publishing' )
do_test( 'abc "multi-man publishing" xyz',
'abc AND ("multi-man publishing" OR "multiman publishing" OR mmp) AND xyz'
do_test( "cccc", "cccc" )
# test one-way aliases with spaces in the replacement text
do_test( "XxX", '(xXx OR "x1 X2" OR x3)' )
do_test( "x1 X2", "x1 AND X2" )
# test one-way aliases with overlapping text in the keys ("foo" vs. "foo bar")
do_test( "foo bar", '("foo bar" OR "{FOO BAR}")' )
do_test( "abc foo bar xyz", 'abc AND ("foo bar" OR "{FOO BAR}") AND xyz' )
do_test( "Xfoo bar", "Xfoo AND bar" )
do_test( "foo barX", '(foo OR {FOO}) AND barX' )
do_test( "Xfoo barX", "Xfoo AND barX" )
# test two-way aliases
do_test( "joe's nose", '("joe\'\'s nose" OR "Joes Nose" OR "Joseph\'\'s Nose")' )
do_test( "abc joe's nose xyz", 'abc AND ("joe\'\'s nose" OR "Joes Nose" OR "Joseph\'\'s Nose") AND xyz' )
do_test( " JOES NOSE ", '("joe\'\'s nose" OR "Joes Nose" OR "Joseph\'\'s Nose")' )
do_test( "Xjoes nose ", "Xjoes AND nose" )
do_test( "joes noseX", "joes AND noseX" )
do_test( "Xjoes noseX", "Xjoes AND noseX" )
do_test( "Joseph's Nose", '("joe\'\'s nose" OR "Joes Nose" OR "Joseph\'\'s Nose")' )
# check that raw queries still have alias processing done
do_test( "foo AND bar", "(foo OR {FOO}) AND bar" )
# ---------------------------------------------------------------------
def test_aslrb_links():
"""Test creating links to the ASLRB."""
def do_test( snippet, expected ):
matches = _find_aslrb_ruleids( snippet )
if expected:
assert len(matches) == len(expected)
for match,exp in zip(matches,expected):
startpos, endpos, ruleid, caption = match
if isinstance( exp, str ):
assert exp == ruleid == caption
assert exp == snippet[ startpos : endpos ]
else:
assert isinstance( exp, tuple )
assert exp[0] == ruleid
assert exp[1] == caption
else:
assert matches == []
# test detecting ruleid's
do_test( "A1.23", ["A1.23"] )
do_test( " A1.23 ", ["A1.23"] )
do_test( ".A1.23,", ["A1.23"] )
do_test( "xA1.23,", None )
do_test( "A1.23 B.4 C5. D6", ["A1.23","B.4"] )
do_test( "A1.23 B.4,C5.;D6", ["A1.23","B.4"] )
# test ruleid ranges
do_test( "A1.23-", ["A1.23"] )
do_test( "A1.23-4", [ ("A1.23","A1.23-4") ] )
do_test( "A1.23-45", [ ("A1.23","A1.23-45") ] )
do_test( "A1.23-.6", [ ("A1.23","A1.23-.6") ] )
do_test( "A1.23-.6.7", [ ("A1.23","A1.23-.6") ] )
# test manually created links
do_test( "A1.23 Z9.99",
[ "A1.23", "Z9.99" ]
)
do_test( "A1.23 {:D5.6|foo:} Z9.99",
[ "A1.23", ("D5.6","foo"), "Z9.99" ]
)
do_test( "A1.23 {:|foo:} Z9.99",
[ "A1.23", ("","foo"), "Z9.99" ]
)
# NOTE: Because the following manual link has no caption, it won't get detected as a manual link,
# and so the ruleid is detected as a normal ruleid.
do_test( "A1.23 {:D5.6|:} Z9.99",
[ "A1.23", "D5.6", "Z9.99" ]
)
# ---------------------------------------------------------------------

@ -1,11 +1,15 @@
""" Test the startup process. """
import pytest
import asl_articles.startup
from asl_articles.tests.utils import init_tests, wait_for, find_child, set_toast_marker, check_toast
from asl_articles.tests import pytest_options
# ---------------------------------------------------------------------
@pytest.mark.skipif( pytest_options.flask_url is not None, reason="Testing against a remote Flask server." )
def test_startup_messages( webdriver, flask_app, dbconn ):
"""Test startup messages."""

@ -145,10 +145,12 @@ def _check_tags( flask_app, expected ): #pylint: disable=too-many-locals
if sr.text.startswith( "publication" ):
pub_id = sr.get_attribute( "testing--pub_id" )
url = flask_app.url_for( "get_publication", pub_id=pub_id )
pub = json.load( urllib.request.urlopen( url ) )
with urllib.request.urlopen( url ) as resp:
pub = json.load( resp )
assert expected[ pub["pub_name"] ] == fixup_tags( pub["pub_tags"] )
elif sr.text.startswith( "article" ):
article_id = sr.get_attribute( "testing--article_id" )
url = flask_app.url_for( "get_article", article_id=article_id )
article = json.load( urllib.request.urlopen( url ) )
with urllib.request.urlopen( url ) as resp:
article = json.load( resp )
assert expected[ article["article_title"] ] == fixup_tags( article["article_tags"] )

@ -1,11 +1,12 @@
""" Helper utilities for the test suite. """
import os
import urllib.request
import json
import time
import itertools
import uuid
import base64
import logging
import sqlalchemy
import sqlalchemy.orm
@ -18,7 +19,6 @@ from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, TimeoutException
from asl_articles import search
from asl_articles.utils import to_bool
import asl_articles.models
@ -34,6 +34,7 @@ def init_tests( webdriver, flask_app, dbconn, **kwargs ):
global _webdriver, _flask_app
_webdriver = webdriver
_flask_app = flask_app
fixtures_dir = os.path.join( os.path.dirname( __file__ ), "fixtures/" )
# initialize the database
fixtures = kwargs.pop( "fixtures", None )
@ -45,6 +46,20 @@ def init_tests( webdriver, flask_app, dbconn, **kwargs ):
assert fixtures is None
session = None
# re-initialize the search engine
if flask_app:
url = flask_app.url_for( "init_search_for_test" )
with urllib.request.urlopen( url ) as resp:
_ = resp.read()
# initialize the documents directory
dname = kwargs.pop( "docs", None )
if dname:
flask_app.config[ "EXTERNAL_DOCS_BASEDIR" ] = os.path.join( fixtures_dir, dname )
else:
if flask_app:
flask_app.config.pop( "EXTERNAL_DOCS_BASEDIR", None )
# never highlight search results unless explicitly enabled
if "no_sr_hilite" not in kwargs:
kwargs[ "no_sr_hilite" ] = 1
@ -56,7 +71,10 @@ def init_tests( webdriver, flask_app, dbconn, **kwargs ):
if to_bool( kwargs.pop( "disable_confirm_discard_changes", True ) ):
kwargs[ "disable_confirm_discard_changes" ] = 1
webdriver.get( webdriver.make_url( "", **kwargs ) )
wait_for_elem( 2, "#search-form" )
# FUDGE! Since we switched from running the test Flask server with app.run() to make_server().serve_forever(),
# stopping and starting the server seems to be much quicker, but refreshing the page can be slower when
# running multiple tests :shrug:
wait_for_elem( 10, "#search-form" )
return session
@ -69,7 +87,8 @@ def load_fixtures( session, fname ):
if fname:
dname = os.path.join( os.path.split(__file__)[0], "fixtures/" )
fname = os.path.join( dname, fname )
data = json.load( open( fname, "r" ) )
with open( fname, "r", encoding="utf-8" ) as fp:
data = json.load( fp )
else:
data = {}
@ -85,9 +104,6 @@ def load_fixtures( session, fname ):
session.bulk_insert_mappings( model, data[table_name] )
session.commit()
# rebuild the search index
search.init_search( session, logging.getLogger("search") )
# ---------------------------------------------------------------------
def do_search( query ):
@ -122,13 +138,15 @@ def get_search_result_names( results=None ):
results = get_search_results()
return [ find_child( ".name", r ).text for r in results ]
def find_search_result( name, results=None ):
def find_search_result( name, results=None, wait=0 ):
"""Find a search result."""
if not results:
results = get_search_results()
results = [ r for r in results if find_child( ".name", r ).text == name ]
assert len(results) == 1
return results[0]
def find_sr():
matches = [
r for r in results or get_search_results()
if find_child( ".name", r ).text == name
]
return matches[0] if len(matches) == 1 else None
return wait_for( wait, find_sr )
def check_search_result( sr, check, expected ):
"""Check a search result in the UI."""
@ -305,21 +323,21 @@ def wait_for_not_elem( timeout, sel ):
def find_child( sel, parent=None ):
"""Find a child element."""
try:
return (parent if parent else _webdriver).find_element_by_css_selector( sel )
return (parent if parent else _webdriver).find_element( By.CSS_SELECTOR, sel )
except NoSuchElementException:
return None
def find_children( sel, parent=None ):
"""Find child elements."""
try:
return (parent if parent else _webdriver).find_elements_by_css_selector( sel )
return (parent if parent else _webdriver).find_elements( By.CSS_SELECTOR, sel )
except NoSuchElementException:
return None
def find_parent_by_class( elem, class_name ):
"""Find a parent element with the specified class."""
while True:
elem = elem.find_element_by_xpath( ".." )
elem = elem.find_element( By.XPATH, ".." )
if not elem:
return None
classes = set( elem.get_attribute( "class" ).split() )
@ -469,10 +487,23 @@ def get_article_row( dbconn, article_id, fields ):
# ---------------------------------------------------------------------
def call_with_retry( func, expected_exceptions, max_retries=10, delay=0.1 ):
"""Try to call a function, with retries if it fails."""
for _ in range(0,max_retries):
try:
return func()
except Exception as exc: #pylint: disable=broad-except
if type(exc) not in expected_exceptions: #pylint: disable=unidiomatic-typecheck
raise
time.sleep( delay )
continue
assert False
def change_image( dlg, fname ):
"""Click on an image to change it."""
# NOTE: This is a bit tricky since we started overlaying the image with the "remove image" icon :-/
data = base64.b64encode( open( fname, "rb" ).read() )
with open( fname, "rb" ) as fp:
data = base64.b64encode( fp.read() )
data = "{}|{}".format( os.path.split(fname)[1], data.decode("ascii") )
elem = find_child( ".row.image img.image", dlg )
_webdriver.execute_script( "arguments[0].scrollTo( 0, 0 )", find_child( ".MuiDialogContent-root", dlg ) )

@ -1,6 +1,7 @@
""" Helper utilities. """
import re
import configparser
import typing
import itertools
import logging
@ -38,17 +39,17 @@ def get_request_args( vals, arg_names, log=None ):
def clean_request_args( vals, fields, warnings, logger ):
"""Clean incoming data."""
cleaned = {}
for f in fields:
if f.endswith( "_url" ):
continue # nb: don't clean URL's
f = _parse_arg_name( f )[ 0 ]
if isinstance( vals[f], str ):
val2 = clean_html( vals[f] )
if val2 != vals[f]:
vals[f] = val2
cleaned[f] = val2
logger.debug( "Cleaned HTML: %s => %s", f, val2 )
warnings.append( "Some values had HTML cleaned up." )
return cleaned
if not isinstance( vals[f], str ):
continue
val2 = clean_html( vals[f] )
if val2 != vals[f]:
vals[f] = val2
logger.debug( "Cleaned HTML: %s => %s", f, val2 )
warnings.append( "Some values had HTML cleaned up." )
def _parse_arg_name( arg_name ):
"""Parse a request argument name."""
@ -56,15 +57,15 @@ def _parse_arg_name( arg_name ):
return ( arg_name[1:], True ) # required argument
return ( arg_name, False ) # optional argument
def make_ok_response( extras=None, updated=None, warnings=None ):
def make_ok_response( extras=None, record=None, warnings=None ):
"""Generate a Flask 'success' response."""
resp = { "status": "OK" }
if extras:
resp.update( extras )
if updated:
resp[ "updated" ] = updated
if record:
resp["record"] = record
if warnings:
resp[ "warnings" ] = list( set( warnings ) ) # nb: remove duplicate messages
resp["warnings"] = list( set( warnings ) ) # nb: remove duplicate messages
return jsonify( resp )
# ---------------------------------------------------------------------
@ -82,7 +83,9 @@ def clean_html( val, allow_tags=None, safe_attrs=None ): #pylint: disable=too-ma
# fixup smart quotes and dashes
def replace_chars( val, ch, targets ):
for t in targets:
if isinstance( t, typing.Pattern ):
# FUDGE! pylint is incorrectly flagging isinstance() when checking against typing.XXX.
# https://github.com/PyCQA/pylint/issues/3537
if isinstance( t, typing.Pattern ): #pylint: disable=isinstance-second-argument-not-valid-type
val = t.sub( ch, val )
else:
assert isinstance( t, str )
@ -126,6 +129,8 @@ def clean_html( val, allow_tags=None, safe_attrs=None ): #pylint: disable=too-ma
args[ "remove_unknown_tags" ] = None
if safe_attrs is None:
safe_attrs = _html_whitelists.get( "attrs" )
if safe_attrs:
safe_attrs.extend( lxml.html.defs.safe_attrs )
elif safe_attrs == []:
safe_attrs = [ "" ] # nb: this is how we remove everything :-/
if safe_attrs:
@ -194,6 +199,21 @@ def decode_tags( tags ):
# ---------------------------------------------------------------------
class AppConfigParser():
"""Wrapper around the standard ConfigParser."""
def __init__( self, fname ):
self._configparser = configparser.ConfigParser()
self._configparser.optionxform = str # preserve case for the keys :-/
self._configparser.read( fname )
def get_section( self, section_name ):
"""Read a section from the config."""
try:
return self._configparser.items( section_name )
except configparser.NoSectionError:
return []
# ---------------------------------------------------------------------
def apply_attrs( obj, vals ):
"""Update an object's attributes."""
for k,v in vals.items():
@ -209,3 +229,7 @@ def to_bool( val ):
if val in ["no","false","disabled","0"]:
return False
return None
def squash_spaces( val ):
"""Squash multiple spaces down into a single space."""
return " ".join( val.split() )

@ -9,6 +9,7 @@ from urllib.error import URLError
import pytest
import flask
import werkzeug
import sqlalchemy
from flask_sqlalchemy import SQLAlchemy
import alembic
@ -17,10 +18,12 @@ import alembic.config
import asl_articles
from asl_articles import app
from asl_articles.utils import to_bool
from asl_articles.tests import utils
from asl_articles import tests as asl_articles_tests
_FLASK_SERVER_URL = ( "localhost", 5001 ) # nb: for the test Flask server we spin up
_pytest_options = None
# ---------------------------------------------------------------------
def pytest_addoption( parser ):
@ -60,6 +63,15 @@ def pytest_addoption( parser ):
help="Database connection string."
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def pytest_configure( config ):
"""Called after command-line options have been parsed."""
global _pytest_options
_pytest_options = config.option
# notify the test suite about the pytest options
asl_articles_tests.pytest_options = _pytest_options
# ---------------------------------------------------------------------
@pytest.fixture( scope="session" )
@ -88,25 +100,28 @@ def flask_app( request ):
# the *configured* database connection string (since it will fail to start if there's a problem).
asl_articles._disable_db_startup = True #pylint: disable=protected-access
# yup - make it so
server = werkzeug.serving.make_server(
_FLASK_SERVER_URL[0], _FLASK_SERVER_URL[1],
app, threaded=True
)
thread = threading.Thread(
target = lambda: app.run(
host=_FLASK_SERVER_URL[0], port=_FLASK_SERVER_URL[1],
use_reloader=False
)
target = server.serve_forever,
daemon=True
)
thread.start()
# wait for the server to start up
def is_ready():
"""Try to connect to the Flask server."""
try:
resp = urllib.request.urlopen( app.url_for( "ping" ) ).read()
assert resp == b"pong"
url = app.url_for( "ping" )
with urllib.request.urlopen( url ) as resp:
assert resp.read() == b"pong"
return True
except URLError:
return False
except Exception as ex: #pylint: disable=broad-except
assert False, "Unexpected exception: {}".format( ex )
utils.wait_for( 5, is_ready )
asl_articles_tests.utils.wait_for( 5, is_ready )
# return the server to the caller
try:
@ -114,7 +129,7 @@ def flask_app( request ):
finally:
# shutdown the local Flask server
if not flask_url:
urllib.request.urlopen( app.url_for("shutdown") ).read()
server.shutdown()
thread.join()
# ---------------------------------------------------------------------
@ -131,14 +146,12 @@ def webdriver( request ):
options = wb.FirefoxOptions()
if headless:
options.add_argument( "--headless" ) #pylint: disable=no-member
driver = wb.Firefox(
options = options,
service_log_path = os.path.join( tempfile.gettempdir(), "geckodriver.log" )
)
driver = wb.Firefox( options=options )
elif driver == "chrome":
options = wb.ChromeOptions()
if headless:
options.add_argument( "--headless" ) #pylint: disable=no-member
options.add_argument( "--disable-gpu" )
driver = wb.Chrome( options=options )
else:
raise RuntimeError( "Unknown webdriver: {}".format( driver ) )

Binary file not shown.

After

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

@ -1,38 +1,31 @@
# Set the TAG environment variable to set the image tag e.g.
# TAG=testing docker-compose build ...
#
# Set the ENABLE_TESTS build argument to support test functionality in the containers e.g.
# docker-compose build --build-arg ENABLE_TESTS=1
#
# DBCONN is a database connection string that is passed straight through to the Flask container.
# However, if we're talking to a SQLite database, we also need to mount the file into the container.
# This is done via the SQLITE variable, but since Docker doesn't allow any way to do things conditionally,
# it needs to be set even if it's not being used :-/
#
# Similarly, EXTERNAL_DOCS_BASEDIR is the base directory for external documents that we want to link to,
# but it needs to be set even if it's not being used :-/
#
# See the run-containers.sh script that manages all of this.
# IMPORTANT: Use run-containers.sh to set up the necessary environment variables.
version: "3"
version: "3.4"
services:
web:
image: asl-articles-web:$TAG
build: web
build:
context: web
network: $BUILD_NETWORK
ports:
- "3002:80"
- $WEB_PORTNO:80
flask:
image: asl-articles-flask:$TAG
build:
context: .
network: $BUILD_NETWORK
args:
ENABLE_TESTS: $ENABLE_TESTS
ports:
- "5002:5000"
- $FLASK_PORTNO:5000
volumes:
- $SQLITE:/data/sqlite.db
- $EXTERNAL_DOCS_BASEDIR:/data/docs/
- $USER_FILES_BASEDIR:/data/user-files/
- $AUTHOR_ALIASES:/app/asl_articles/config/author-aliases.cfg
environment:
- DBCONN
- EXTERNAL_DOCS_BASEDIR
- USER_FILES_BASEDIR
- ASLRB_BASE_URL

@ -4,3 +4,4 @@ FLASK_HOST = 0.0.0.0
IS_CONTAINER = 1
EXTERNAL_DOCS_BASEDIR = /data/docs/
USER_FILES_BASEDIR = /data/user-files/

@ -1,5 +1,5 @@
pytest==5.2.2
selenium==3.141.0
pylint==2.4.3
pylint-flask-sqlalchemy==0.1.0
pytest-pylint==0.14.1
pytest==7.1.2
selenium==4.2.0
pylint==2.14.1
pylint-flask-sqlalchemy==0.2.0
pytest-pylint==0.18.0

@ -1,8 +1,9 @@
# python 3.7.5
# python 3.10.4
flask==1.1.1
flask-sqlalchemy==2.4.1
psycopg2-binary==2.8.4
alembic==1.3.1
pyyaml==5.1.2
lxml==4.4.2
flask==2.1.2
flask-sqlalchemy==2.5.1
psycopg2-binary==2.9.3
alembic==1.8.0
pyyaml==6.0
lxml==4.9.0
waitress==2.1.2

@ -1,60 +1,170 @@
#!/usr/bin/env bash
# Helper script that builds and launches the Docker containers.
# parse the command-line arguments
if [ -z "$1" ]; then
echo "Usage: `basename "$0"` <db-conn> <external-docs>"
echo " Build and launch the \"asl-articles\" containers, using the specified database e.g."
echo " ~/asl-articles.db (path to a SQLite database)"
echo " postgresql://USER:PASS@host/dbname (database connection string)"
echo " Note that the database server address is relative to the container i.e. NOT \"localhost\"."
echo
echo " If you want link articles to their original documents, specify a base directory for the documents."
# ---------------------------------------------------------------------
function print_help {
echo "`basename "$0"` {options}"
echo " Build and launch the \"asl-articles\" containers."
echo
echo " The TAG env variable can also be set to specify which containers to run e.g."
echo " TAG=testing ./run.sh /tmp/asl-articles.db"
echo " -t --tag Docker container tag e.g. \"testing\" or \"latest\"."
echo " -d --dbconn Database connection string e.g."
echo " ~/asl-articles.db (path to a SQLite database)"
echo " postgresql://USER:PASS@host/dbname (database connection string)"
echo " Note that the database server address is relative to the container i.e. NOT \"localhost\"."
echo " --web-portno Webapp port number."
echo " --flask-portno Flask backend server port number."
echo " -e --extdocs Base directory for external documents (to allow articles to link to them)."
echo " -u --user-files Base directory for user files."
echo " -r --aslrb Base URL for an eASLRB."
echo " -a --author-aliases Author aliases config file (see config/author-aliases.cfg.example)."
echo " --no-build Launch the containers as they are (i.e. without rebuilding them first)."
echo " --build-network Docker network to use when building the container."
}
# ---------------------------------------------------------------------
# initialize
cd `dirname "$0"`
export TAG=
export DBCONN=
export SQLITE=
export WEB_PORTNO=3002
export FLASK_PORTNO=5002
export EXTERNAL_DOCS_BASEDIR=
export USER_FILES_BASEDIR=
export ASLRB_BASE_URL=
export AUTHOR_ALIASES=
export ENABLE_TESTS=
NO_BUILD=
export BUILD_NETWORK=
export DOCKER_UID=$(id -u)
export DOCKER_GID=$(id -g)
# parse the command-line arguments
if [ $# -eq 0 ]; then
print_help
exit 0
fi
if [ -f "$1" ]; then
params="$(getopt -o t:d:e:u:r:a:h -l tag:,dbconn:,web-portno:,flask-portno:,extdocs:,user-files:,aslrb:,author-aliases:,no-build,build-network:,help --name "$0" -- "$@")"
if [ $? -ne 0 ]; then exit 1; fi
eval set -- "$params"
while true; do
case "$1" in
-t | --tag )
TAG=$2
shift 2 ;;
-d | --dbconn )
DBCONN=$2
shift 2 ;;
--web-portno )
WEB_PORTNO=$2
shift 2 ;;
--flask-portno )
FLASK_PORTNO=$2
shift 2 ;;
-e | --extdocs )
EXTERNAL_DOCS_BASEDIR=$2
shift 2 ;;
-u | --user-files )
USER_FILES_BASEDIR=$2
shift 2 ;;
-r | --aslrb )
ASLRB_BASE_URL=$2
shift 2 ;;
-a | --author-aliases )
AUTHOR_ALIASES=$2
shift 2 ;;
--no-build )
NO_BUILD=1
shift 1 ;;
--build-network )
# FUDGE! We sometimes can't get out to the internet from the container (DNS problems) using the default
# "bridge" network, so we offer the option of using an alternate network (e.g. "host").
BUILD_NETWORK=$2
shift 2 ;;
-h | --help )
print_help
exit 0 ;;
-- ) shift ; break ;;
* )
echo "Unknown option: $1" >&2
exit 1 ;;
esac
done
# prepare the database connection string
if [ -z "$DBCONN" ]; then
echo "No database was specified."
exit 3
fi
if [ -f "$DBCONN" ]; then
# connect to a SQLite database
export SQLITE=$1
export DBCONN=sqlite:////data/sqlite.db
SQLITE=$DBCONN
DBCONN=sqlite:////data/sqlite.db
else
# FUDGE! We pass the database connection string (DBCONN) through to the container,
# but this needs to be set, even if it's not being used :-/
SQLITE=/dev/null
fi
# initialize for testing
if [ "$TAG" == "testing" ]; then
echo -e "*** WARNING! Test mode is enabled! ***\n"
ENABLE_TESTS=1
else
# pass the database connection string through to the container
export SQLITE=/dev/null
export DBCONN=$1
if [ -z "$TAG" ]; then
TAG=latest
fi
fi
if [ ! -z "$2" ]; then
# set the base directory for external documents
export EXTERNAL_DOCS_BASEDIR=$2
# check the external documents directory
if [ -n "$EXTERNAL_DOCS_BASEDIR" ]; then
if [ ! -d "$EXTERNAL_DOCS_BASEDIR" ]; then
echo "Invalid document base directory: $EXTERNAL_DOCS_BASEDIR"
echo "Can't find the external documents base directory: $EXTERNAL_DOCS_BASEDIR"
exit 1
fi
else
# FUDGE! This needs to be set, even if it's not being used :-/
export EXTERNAL_DOCS_BASEDIR=/dev/null
EXTERNAL_DOCS_BASEDIR=/dev/null
fi
# initialize
if [ "$TAG" == "testing" ]; then
echo "*** WARNING! Special test functionality is enabled."
export ENABLE_TESTS=1
elif [ "$TAG" == "prod" ]; then
export ENABLE_TESTS=
# check the user files directory
if [ -n "$USER_FILES_BASEDIR" ]; then
if [ ! -d "$USER_FILES_BASEDIR" ]; then
echo "Can't find the user files base directory: $USER_FILES_BASEDIR"
exit 1
fi
else
export ENABLE_TESTS=
export TAG=latest
# FUDGE! This needs to be set, even if it's not being used :-/
USER_FILES_BASEDIR=/dev/null
fi
# check the author aliases
if [ -n "$AUTHOR_ALIASES" ]; then
if [ ! -f "$AUTHOR_ALIASES" ]; then
echo "Can't find the author aliases config file: $AUTHOR_ALIASES"
exit 1
fi
else
# FUDGE! This needs to be set, even if it's not being used :-/
AUTHOR_ALIASES=/dev/null
fi
# build the containers
echo Building the \"$TAG\" containers...
docker-compose build --build-arg ENABLE_TESTS=$ENABLE_TESTS 2>&1 \
| sed -e 's/^/ /'
if [ $? -ne 0 ]; then exit 10 ; fi
echo
if [ -z "$NO_BUILD" ]; then
echo Building the \"$TAG\" containers...
docker-compose build --build-arg ENABLE_TESTS=$ENABLE_TESTS 2>&1 \
| sed -e 's/^/ /'
if [ ${PIPESTATUS[0]} -ne 0 ]; then exit 10 ; fi
echo
fi
# launch the containers
echo Launching the \"$TAG\" containers...
if [ -n "$ENABLE_TESTS" ]; then
echo " *** TEST MODE ***"
fi
docker-compose up --detach 2>&1 \
| sed -e 's/^/ /'
exit ${PIPESTATUS[0]}

@ -2,8 +2,13 @@
""" Run the Flask backend server. """
import os
import threading
import urllib.request
import time
import glob
from flask import url_for
# ---------------------------------------------------------------------
# monitor extra files for changes
@ -21,11 +26,49 @@ for fspec in ["config","static","templates"] :
files = glob.glob( fspec )
extra_files.extend( files )
# run the server
# initialize
from asl_articles import app
app.run(
host = app.config.get( "FLASK_HOST", "localhost" ),
port = app.config.get( "FLASK_PORT_NO" ),
debug = app.config.get( "FLASK_DEBUG", False ),
extra_files = extra_files
)
flask_host = app.config.get( "FLASK_HOST", "localhost" )
flask_port = app.config.get( "FLASK_PORT_NO", 5000 )
flask_debug = app.config.get( "FLASK_DEBUG", False )
# FUDGE! Startup can take some time (e.g. because we have to build the search index over a large database),
# and since we do that on first request, it's annoying to have started the server up, if we don't do that
# first request immediately, the server sits there idling, when it could be doing that startup initialization,
# and we then have to wait when we eventually do that first request.
# We fix this by doing the first request ourself here (something harmless).
def _force_init():
time.sleep( 5 )
try:
# figure out the URL for the request we're going to make
with app.test_request_context() as req:
url = url_for( "ping" )
host = req.request.host_url
if host.endswith( "/" ):
host = host[:-1]
url = "{}:{}{}".format( host, flask_port, url )
# make the request
with urllib.request.urlopen( url ) as resp:
_ = resp.read()
except Exception as ex: #pylint: disable=broad-except
print( "WARNING: Startup ping failed: {}".format( ex ) )
threading.Thread( target=_force_init ).start()
# run the server
if flask_debug:
# NOTE: It's useful to run the webapp using the Flask development server, since it will
# automatically reload itself when the source files change.
app.run(
host=flask_host, port=flask_port,
debug=flask_debug,
extra_files=extra_files
)
else:
import waitress
# FUDGE! Browsers tend to send a max. of 6-8 concurrent requests per server, so we increase
# the number of worker threads to avoid task queue warnings :-/
nthreads = app.config.get( "WAITRESS_THREADS", 8 )
waitress.serve( app,
host=flask_host, port=flask_port,
threads=nthreads
)

@ -16,20 +16,22 @@ def parse_requirements( fname ):
"""Parse a requirements file."""
lines = []
fname = os.path.join( os.path.split(__file__)[0], fname )
for line in open(fname,"r"):
line = line.strip()
if line == "" or line.startswith("#"):
continue
lines.append( line )
with open( fname, "r", encoding="utf-8" ) as fp:
for line in fp:
line = line.strip()
if line == "" or line.startswith("#"):
continue
lines.append( line )
return lines
# ---------------------------------------------------------------------
setup(
name = "asl-articles",
version = "0.1", # nb: also update constants.py
version = "1.1", # nb: also update constants.py
description = "Searchable index of ASL articles.",
license = "AGPLv3",
url = "https://code.pacman-ghost.com/public/asl-articles",
packages = find_packages(),
install_requires = parse_requirements( "requirements.txt" ),
extras_require = {

@ -62,7 +62,8 @@ def import_roar_scenarios( dbconn, roar_data, progress=None ):
# load the ROAR scenarios
if isinstance( roar_data, str ):
log_progress( "Loading scenarios: {}", roar_data )
roar_data = json.load( open( roar_data, "r" ) )
with open( roar_data, "r", encoding="utf-8" ) as fp:
roar_data = json.load( fp )
else:
assert isinstance( roar_data, dict )
log_progress( "- Last updated: {}".format( roar_data.get("_lastUpdated_","(unknown)") ) )

@ -1 +1,10 @@
node_modules
*
# NOTE: docker-compose doesn't allow spaces after the !'s :-/
!package.json
!src/
!public/
!docker/

@ -1,19 +1,20 @@
# NOTE: Multi-stage builds require Docker v17.05 or later.
# create the build environment
FROM node:8.16.2-alpine AS build
FROM node:18-alpine3.15 AS build
WORKDIR /app
ENV PATH /app/node_modules/.bin:$PATH
RUN npm install react-scripts@3.2.0 -g
RUN npm install react-scripts@5.0.1 --location=global
COPY package.json /app/package.json
RUN npm install
COPY . /app/
ARG ENABLE_TESTS
COPY docker/env /app/.env
RUN if [ -n "$ENABLE_TESTS" ]; then echo -e "\nREACT_APP_TEST_MODE=1" >>/app/.env ; fi
RUN npm run build
# create the final target image
FROM nginx:1.17.5-alpine
FROM nginx:1.21.6-alpine
COPY docker/nginx-default.conf /etc/nginx/conf.d/default.conf
COPY --from=build /app/build /usr/share/nginx/html
EXPOSE 80

@ -0,0 +1,3 @@
# This file will be copied into the container as the React .env file.
REACT_APP_FLASK_URL = http://localhost:5000

@ -8,10 +8,20 @@ server {
try_files $uri $uri/ /index.html ;
}
# nb: we access the backend server via the network bridge that docker-compose creates for us
location /api {
# nb: we access the backend server via the network bridge that docker-compose creates for us
proxy_pass http://flask:5000 ;
rewrite ^/api/(.*) /$1 break ;
# NOTE: The frontend and backend containers will be running on the same host. If we can't connect
# within a few seconds, something's wrong and there's nothing to be gained by having longer timeouts.
proxy_connect_timeout 10 ;
proxy_read_timeout 10 ;
proxy_send_timeout 10 ;
}
location /user {
proxy_pass http://flask:5000 ;
rewrite ^/user/(.*) /user-files/$1 break ;
}
error_page 500 502 503 504 /50x.html ;

37270
web/package-lock.json generated

File diff suppressed because it is too large Load Diff

@ -6,17 +6,21 @@
"@material-ui/core": "^4.7.0",
"@reach/menu-button": "^0.7.2",
"axios": "^0.19.0",
"babel-runtime": "^6.26.0",
"http-proxy-middleware": "^0.20.0",
"jquery": "^3.4.1",
"lodash.clone": "^4.5.0",
"lodash.clonedeep": "^4.5.0",
"lodash.isequal": "^4.5.0",
"query-string": "^7.1.1",
"react": "^16.11.0",
"react-dom": "^16.11.0",
"react-drag-listview": "^0.1.6",
"react-draggable": "^4.1.0",
"react-router-dom": "^5.1.2",
"react-scripts": "3.2.0",
"react-scripts": "5.0.1",
"react-select": "^3.0.8",
"react-tabs": "^3.2.3",
"react-toastify": "^5.4.1"
},
"scripts": {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 105 KiB

After

Width:  |  Height:  |  Size: 9.4 KiB

Before

Width:  |  Height:  |  Size: 372 B

After

Width:  |  Height:  |  Size: 372 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 6.8 KiB

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 584 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.4 KiB

After

Width:  |  Height:  |  Size: 1.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

@ -0,0 +1,48 @@
div.jquery-image-zoom {
line-height: 0;
font-size: 0;
z-index: 10;
border: 5px solid #fff;
background: #eee; /* TM 25jan15: Added this to make it easier to see images with transparent backgrounds. */
margin: -5px;
-webkit-box-shadow: 0 0 10px rgba(0, 0, 0, 0.3);
-moz-box-shadow: 0 0 10px rgba(0, 0, 0, 0.3);
box-shadow: 0 0 10px rgba(0, 0, 0, 0.3);
}
div.jquery-image-zoom a {
background: url(/jQuery/imageZoom/jquery.imageZoom.png) no-repeat;
display: block;
width: 25px;
height: 25px;
position: absolute;
left: -17px;
top: -17px;
/* IE-users are prolly used to close-link in right-hand corner */
*left: auto;
*right: -17px;
text-decoration: none;
text-indent: -100000px;
outline: 0;
z-index: 11;
}
div.jquery-image-zoom a:hover {
background-position: left -25px;
}
div.jquery-image-zoom img,
div.jquery-image-zoom embed,
div.jquery-image-zoom object,
div.jquery-image-zoom div {
width: 100%;
height: 100%;
margin: 0;
}

@ -0,0 +1,195 @@
/***
@title:
Image Zoom
@version:
2.0
@author:
Andreas Lagerkvist
@date:
2008-08-31
@url:
http://andreaslagerkvist.com/jquery/image-zoom/
@license:
http://creativecommons.org/licenses/by/3.0/
@copyright:
2008 Andreas Lagerkvist (andreaslagerkvist.com)
@requires:
jquery, jquery.imageZoom.css, jquery.imageZoom.png
@does:
This plug-in makes links pointing to images open in the "Image Zoom". Clicking a link will zoom out the clicked image to its target-image. Click anywhere on the image or the close-button to zoom the image back in. Only ~3k minified.
@howto:
jQuery(document.body).imageZoom(); Would make every link pointing to an image in the document open in the zoom.
@exampleHTML:
<ul>
<li><a href="http://exscale.se/__files/3d/bloodcells.jpg">Bloodcells</a></li>
<li><a href="http://exscale.se/__files/3d/x-wing.jpg">X-Wing</a></li>
<li><a href="http://exscale.se/__files/3d/weve-moved.jpg">We've moved</a></li>
</ul>
<ul>
<li><a href="http://exscale.se/__files/3d/lamp-and-mates/lamp-and-mates-01.jpg"><img src="http://exscale.se/__files/3d/lamp-and-mates/lamp-and-mates-01_small.jpg" alt="Lamp and Mates" /></a></li>
<li><a href="http://exscale.se/__files/3d/stugan-winter.jpg"><img src="http://exscale.se/__files/3d/stugan-winter_small.jpg" alt="The Cottage - Winter time" /></a></li>
<li><a href="http://exscale.se/__files/3d/ps2.jpg"><img src="http://exscale.se/__files/3d/ps2_small.jpg" alt="PS2" /></a></li>
</ul>
@exampleJS:
// I don't run it because my site already uses imgZoom
// jQuery(document.body).imageZoom();
***/
jQuery.fn.imageZoom = function (conf) {
// Some config. If you set dontFadeIn: 0 and hideClicked: 0 imgzoom will act exactly like fancyzoom
var config = jQuery.extend({
speed: 200, // Animation-speed of zoom
dontFadeIn: 1, // 1 = Do not fade in, 0 = Do fade in
hideClicked: 1, // Whether to hide the image that was clicked to bring up the imgzoom
imageMargin: 30, // Margin from image-edge to window-edge if image is larger than screen
className: 'jquery-image-zoom',
loading: 'Loading...'
}, conf);
config.doubleSpeed = config.speed / 4; // Used for fading in the close-button
return this.click(function(e) {
// Make sure the target-element is a link (or an element inside a link)
var clickedElement = jQuery(e.target); // The element that was actually clicked
var clickedLink = clickedElement.is('a') ? clickedElement : clickedElement.parents('a'); // If it's not an a, check if any of its parents is
// TM MAR/20: Removed the check on the filename extension (it was looking for an image-type extension).
clickedLink = (clickedLink && clickedLink.is('a')) ? clickedLink : false; // If it was an a or child of an a, make sure it points to an image
var clickedImg = (clickedLink && clickedLink.find('img').length) ? clickedLink.find('img') : false; // See if the clicked link contains and image
// Only continue if a link pointing to an image was clicked
if (clickedLink) {
// These functions are used when the imaeg starts and stops loading (displays either 'loading..' or fades out the clicked img slightly)
clickedLink.oldText = clickedLink.text();
clickedLink.setLoadingImg = function () {
if (clickedImg) {
clickedImg.css({opacity: '0.5'});
}
else {
clickedLink.text(config.loading);
}
};
clickedLink.setNotLoadingImg = function () {
if (clickedImg) {
clickedImg.css({opacity: '1'});
}
else {
clickedLink.text(clickedLink.oldText);
}
};
// The URI to the image we are going to display
var displayImgSrc = clickedLink.attr('href');
// If an imgzoom wiv this image is already open dont do nathin
if (jQuery('div.' + config.className + ' img[src="' + displayImgSrc + '"]').length) {
return false;
}
// This function is run once the displayImgSrc-img has loaded (below)
var preloadOnload = function (pload) {
// The clicked-link is faded out during loading, fade it back in
clickedLink.setNotLoadingImg();
// Now set some vars we need
var dimElement = clickedImg ? clickedImg : clickedLink; // The element used to retrieve dimensions of imgzoom before zoom (either clicked link or img inside)
var hideClicked = clickedImg ? config.hideClicked : 0; // Whether to hide clicked link (set in config but always true for non-image-links)
var offset = dimElement.offset(); // Offset of clicked link (or image inside)
var imgzoomBefore = { // The dimensions of the imgzoom _before_ it is zoomed out
width: dimElement.outerWidth(),
height: dimElement.outerHeight(),
left: offset.left,
top: offset.top/*,
opacity: config.dontFadeIn*/
};
var imgzoom = jQuery('<div><img src="' + displayImgSrc + '" alt=""/></div>').css('position', 'absolute').appendTo(document.body); // We don't want any class-name or any other contents part from the image when we calculate the new dimensions of the imgzoom
var imgzoomAfter = { // The dimensions of the imgzoom _after_ it is zoomed out
width: pload.width,
height: pload.height/*,
opacity: 1*/
};
var windowDim = {
width: jQuery(window).width(),
height: jQuery(window).height()
};
// Make sure imgzoom isn't wider than screen
if (imgzoomAfter.width > (windowDim.width - config.imageMargin * 2)) {
var nWidth = windowDim.width - config.imageMargin * 2;
imgzoomAfter.height = (nWidth / imgzoomAfter.width) * imgzoomAfter.height;
imgzoomAfter.width = nWidth;
}
// Now make sure it isn't taller
if (imgzoomAfter.height > (windowDim.height - config.imageMargin * 2)) {
var nHeight = windowDim.height - config.imageMargin * 2;
imgzoomAfter.width = (nHeight / imgzoomAfter.height) * imgzoomAfter.width;
imgzoomAfter.height = nHeight;
}
// Center imgzoom
imgzoomAfter.left = (windowDim.width - imgzoomAfter.width) / 2 + jQuery(window).scrollLeft();
imgzoomAfter.top = (windowDim.height - imgzoomAfter.height) / 2 + jQuery(window).scrollTop();
var closeButton = jQuery('<a href="#">Close</a>').appendTo(imgzoom).hide(); // The button that closes the imgzoom (we're adding this after the calculation of the dimensions)
// Hide the clicked link if set so in config
if (hideClicked) {
clickedLink.css('visibility', 'hidden');
}
// Now animate the imgzoom from its small size to its large size, and then fade in the close-button
imgzoom.addClass(config.className).css(imgzoomBefore).animate(imgzoomAfter, config.speed, function () {
closeButton.fadeIn(config.doubleSpeed);
});
// This function closes the imgzoom
var hideImgzoom = function () {
closeButton.fadeOut(config.doubleSpeed, function () {
imgzoom.animate(imgzoomBefore, config.speed, function () {
clickedLink.css('visibility', 'visible');
imgzoom.remove();
});
});
return false;
};
// Close imgzoom when you click the closeButton or the imgzoom
imgzoom.click(hideImgzoom);
closeButton.click(hideImgzoom);
};
// Preload image
var preload = new Image();
preload.src = displayImgSrc;
if (preload.complete) {
preloadOnload(preload);
}
else {
clickedLink.setLoadingImg();
preload.onload = function () {
preloadOnload(preload);
};
}
// Finally return false from the click so the browser doesn't actually follow the link...
return false;
}
});
};
// NOTE: We used to close up on ESC, but we want to do this on *any* keypress (e.g. if the user
// starts typing in the search query box) or click (e.g. if the user clicks on a menu).
$(document).keydown( () => { $("div.jquery-image-zoom a").click() ; } ) ;
$(document).click( () => { $("div.jquery-image-zoom a").click() ; } ) ;

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

@ -1,5 +1,6 @@
#header { position: absolute ; top: 5px ; left: 5px ; right: 5px ; height: 65px ; }
#search-results { position: absolute ; top: 95px ; bottom: 0 ; left: 5px ; right: 5px ; overflow: auto ; }
#search-results, #db-report { position: absolute ; top: 95px ; bottom: 5px ; left: 5px ; right: 5px ; overflow: auto ; }
#db-report { z-index: 10 ; }
#header { border: 1px solid #ccc ; background: #eee ; border-top-right-radius: 10px ; padding: 5px 5px 10px 5px ; }
#header .logo { float: left ; height: 70px ; }
@ -7,13 +8,23 @@
#menu-button--app { position: absolute ; top: 10px ; right: 10px ;
width: 30px ; height: 30px ;
background: url("/images/main-menu.png") transparent no-repeat ; background-size: 100% ; border: none ;
background: url("/public/images/main-menu.png") transparent no-repeat ; background-size: 100% ; border: none ;
cursor: pointer ;
}
[data-reach-menu] { z-index: 999 ; }
[data-reach-menu-list] { padding: 5px ; }
[data-reach-menu-item] { padding: 5px ; }
[data-reach-menu-item][data-selected] { background: #90caf9 ; color: black ; }
[data-reach-menu-item] { display: flex ; height: 1.25em ; padding: 5px ; }
[data-reach-menu-item][data-selected] { background: #b0e0ff ; color: black ; }
[data-reach-menu-list] .divider { height: 0 ; margin: 2px 0 ; border-top: 1px solid #aaa ; }
[data-reach-menu-list] img { height: 100% ; margin-top: -0.1em ; margin-right: 0.5em ; }
#watermark {
position: fixed ; right: 0 ; bottom: 0 ;
height: 30% ;
opacity: 0 ; z-index: -999 ;
transition: opacity 5s ;
}
#watermark img { height: 100% ; }
.MuiDialogTitle-root { padding: 10px 16px 6px 16px !important ; }
.MuiDialogContent-root>div { margin-bottom: 1em ; }
@ -38,4 +49,8 @@
img#loading { position: fixed ; top: 50% ; left: 50% ; margin-top: -16px ; margin-left: -16px ; }
.collapsible .caption img { height: 0.75em ; margin-left: 0.25em ; }
.collapsible .count { font-size: 80% ; font-style: italic ; color: #666 ; }
.collapsible .more { font-size: 80% ; font-style: italic ; color: #666 ; cursor: pointer ; }
.monospace { margin-top: 0.5em ; font-family: monospace ; font-style: italic ; font-size: 80% ; }

@ -10,14 +10,18 @@ import { SearchResults } from "./SearchResults" ;
import { PublisherSearchResult } from "./PublisherSearchResult" ;
import { PublicationSearchResult } from "./PublicationSearchResult" ;
import { ArticleSearchResult } from "./ArticleSearchResult" ;
import { DbReport } from "./DbReport";
import ModalForm from "./ModalForm";
import AskDialog from "./AskDialog" ;
import { makeSmartBulletList } from "./utils.js" ;
import { DataCache } from "./DataCache" ;
import { PreviewableImage } from "./PreviewableImage" ;
import { makeSmartBulletList, isLink } from "./utils.js" ;
import { APP_NAME } from "./constants.js" ;
import "./App.css" ;
const axios = require( "axios" ) ;
const queryString = require( "query-string" ) ;
window.$ = window.jQuery = require( "jquery" ) ;
export let gAppRef = null ;
@ -32,13 +36,17 @@ export class App extends React.Component
this.state = {
searchResults: [],
searchSeqNo: 0,
showDbReport: false,
modalForm: null,
askDialog: null,
startupTasks: [ "caches.publishers", "caches.publications", "caches.authors", "caches.scenarios", "caches.tags" ],
startupTasks: [ "dummy" ], // FUDGE! We need at least one startup task.
} ;
gAppRef = this ;
this.setWindowTitle( null ) ;
// initialize the data cache
this.dataCache = new DataCache() ;
// initialize
this.args = queryString.parse( window.location.search ) ;
this._storeMsgs = this.isTestMode() && this.args.store_msgs ;
@ -66,6 +74,14 @@ export class App extends React.Component
// This also has the nice side-effect of removing CORS issues :-/
this._flaskBaseUrl = "/api" ;
}
// NOTE: Managing publisher/publication/article images is a bit tricky, since they are accessed via a URL
// such as "/articles/images/123", so if the user uploads a new image, the browser has no way of knowing
// that it can't use what's in its cache and must get a new one. We can add something to the URL to force
// a reload (e.g. "?foo=" + Math.random()), but this forces the image to be reloaded *every* time, which is
// pretty inefficient.
// Instead, we track a unique cache-busting value for each image URL, and change it when necessary.
this._flaskImageUrlVersions = {} ;
}
render() {
@ -78,28 +94,29 @@ export class App extends React.Component
const menu = ( <Menu id="app">
<MenuButton />
<MenuList>
<MenuItem id="menu-show-publishers"
onSelect = { () => this.runSpecialSearch( "/search/publishers", null,
() => { this.setWindowTitle( "All publishers" ) }
) } > Show publishers </MenuItem>
<MenuItem id="menu-search-technique"
onSelect = { () => this.runSpecialSearch( "/search/tag/technique", {randomize:1},
() => { this.setWindowTitle( "Technique" ) }
) } > Show technique </MenuItem>
<MenuItem id="menu-search-tips"
onSelect = { () => this.runSpecialSearch( "/search/tag/tips", {randomize:1},
() => { this.setWindowTitle( "Tips" ) }
) } > Show tips </MenuItem>
<MenuItem id="menu-show-publishers" onSelect={ () => this._showPublishers(true) } >
<img src="/images/menu/publishers.png" alt="Show publishers." /> Show publishers
</MenuItem>
<MenuItem id="menu-search-technique" onSelect={ () => this._showTechniqueArticles(true) } >
<img src="/images/menu/technique.png" alt="Show technique articles." /> Show technique
</MenuItem>
<MenuItem id="menu-search-tips" onSelect={ () => this._showTipsArticles(true) } >
<img src="/images/menu/tips.png" alt="Show tip articles." /> Show tips
</MenuItem>
<div className="divider" />
<MenuItem id="menu-new-publisher"
onSelect = { () => PublisherSearchResult.onNewPublisher( this._onNewPublisher.bind(this) ) }
>New publisher</MenuItem>
<MenuItem id="menu-new-publication"
onSelect = { () => PublicationSearchResult.onNewPublication( this._onNewPublication.bind(this) ) }
>New publication</MenuItem>
<MenuItem id="menu-new-article"
onSelect = { () => ArticleSearchResult.onNewArticle( this._onNewArticle.bind(this) ) }
>New article</MenuItem>
<MenuItem id="menu-new-publisher" onSelect={PublisherSearchResult.onNewPublisher} >
<img src="/images/menu/publisher.png" alt="New publisher." /> New publisher
</MenuItem>
<MenuItem id="menu-new-publication" onSelect={PublicationSearchResult.onNewPublication} >
<img src="/images/menu/publication.png" alt="New publication." /> New publication
</MenuItem>
<MenuItem id="menu-new-article" onSelect={ArticleSearchResult.onNewArticle} >
<img src="/images/menu/article.png" alt="New article." /> New article
</MenuItem>
<div className="divider" />
<MenuItem id="menu-db-report" onSelect={ () => this._showDbReport(true) } >
<img src="/images/menu/db-report.png" alt="Database report." /> DB report
</MenuItem>
</MenuList>
</Menu> ) ;
// generate the main content
@ -112,10 +129,13 @@ export class App extends React.Component
<SearchForm onSearch={this.onSearch.bind(this)} ref={this._searchFormRef} />
</div>
{menu}
<SearchResults ref={this._searchResultsRef}
seqNo = {this.state.searchSeqNo}
searchResults = {this.state.searchResults}
/>
{ this.state.showDbReport
? <DbReport />
: <SearchResults ref={this._searchResultsRef}
seqNo = {this.state.searchSeqNo}
searchResults = {this.state.searchResults}
/>
}
</div> ) ;
}
return ( <div> {content}
@ -137,40 +157,29 @@ export class App extends React.Component
{ this._fakeUploads && <div>
<textarea id="_stored_msg-upload_" ref="_stored_msg-upload_" defaultValue="" hidden={true} />
</div> }
<div id="watermark" ref="watermark"> <img src="/images/watermark.png" alt="watermark" /> </div>
</div> ) ;
}
componentDidMount() {
// install our key handler
// initialize
PreviewableImage.initPreviewableImages() ;
window.addEventListener( "keydown", this.onKeyDown.bind( this ) ) ;
// check if the server started up OK
let on_startup_ok = () => {
// the backend server started up OK, continue our startup process
// initialize the caches
// NOTE: We maintain caches of key objects, so that we can quickly populate droplists. The backend server returns
// updated lists after any operation that could change them (create/update/delete), which is simpler and less error-prone
// than trying to manually keep our caches in sync. It's less efficient, but it won't happen too often, there won't be
// too many entries, and the database server is local.
this.caches = {} ;
[ "publishers", "publications", "authors", "scenarios", "tags" ].forEach( type => {
axios.get( this.makeFlaskUrl( "/" + type ) )
.then( resp => {
this.caches[ type ] = resp.data ;
this._onStartupTask( "caches." + type ) ;
} )
.catch( err => {
this.showErrorToast( <div> Couldn't load the {type}: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ) ;
} ) ;
this._onStartupTask( "dummy" ) ;
}
let on_startup_failure = () => {
// the backend server had problems during startup; we hide the spinner
// and leave the error message(s) on-screen.
document.getElementById( "loading" ).style.display = "none" ;
}
axios.get( this.makeFlaskUrl( "/startup-messages" ) )
.then( resp => {
axios.get(
this.makeFlaskUrl( "/startup-messages" )
).then( resp => {
// show any messages logged by the backend server as it started up
[ "info", "warning", "error" ].forEach( msgType => {
if ( resp.data[ msgType ] ) {
@ -190,9 +199,12 @@ export class App extends React.Component
on_startup_failure() ;
else
on_startup_ok() ;
} )
.catch( err => {
this.showErrorToast( <div> Couldn't get the startup messages: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ).catch( err => {
let errorMsg = err.toString() ;
if ( errorMsg.indexOf( "502" ) !== -1 || errorMsg.indexOf( "504" ) !== -1 )
this.showErrorToast( <div> Couldn't connect to the backend Flask server. </div> ) ;
else
this.showErrorToast( <div> Couldn't get the startup messages: <div className="monospace"> {errorMsg} </div> </div> ) ;
on_startup_failure() ;
} ) ;
}
@ -231,64 +243,102 @@ export class App extends React.Component
}
this._doSearch( "/search", { query: query } ) ;
}
runSpecialSearch( url, args, onDone ) {
// run the search
this._searchFormRef.current.setState( { queryString: "" } ) ;
if ( ! args )
args = {} ;
this._doSearch( url, args, onDone ) ;
}
_doSearch( url, args, onDone ) {
// do the search
this.setWindowTitle( null ) ;
this.setState( { searchResults: "(loading)" } ) ;
this.setState( { searchResults: "(loading)", showDbReport: false } ) ;
args.no_hilite = this._disableSearchResultHighlighting ;
axios.post(
this.makeFlaskUrl( url ), args
)
.then( resp => {
).then( resp => {
ReactDOM.findDOMNode( this._searchResultsRef.current ).scrollTo( 0, 0 ) ;
this.setState( { searchResults: resp.data, searchSeqNo: this.state.searchSeqNo+1 } ) ;
if ( onDone )
onDone() ;
} )
.catch( err => {
} ).catch( err => {
this.showErrorResponse( "The search query failed", err ) ;
this.setState( { searchResults: null, searchSeqNo: this.state.searchSeqNo+1 } ) ;
} ) ;
}
_onNewPublisher( publ_id, vals ) { this._addNewSearchResult( vals, "publisher", "publ_id", publ_id ) ; }
_onNewPublication( pub_id, vals ) { this._addNewSearchResult( vals, "publication", "pub_id", pub_id ) ; }
_onNewArticle( article_id, vals ) { this._addNewSearchResult( vals, "article", "article_id", article_id ) ; }
_addNewSearchResult( vals, srType, idName, idVal ) {
// add the new search result to the start of the search results
// NOTE: This isn't really the right thing to do, since the new object might not actually be
// a result for the current search, but it's nice to give the user some visual feedback.
vals.type = srType ;
vals[ idName ] = idVal ;
let newSearchResults = [ vals ] ;
runSpecialSearch( url, args, onDone ) {
// run the search
this._searchFormRef.current.setState( { queryString: "" } ) ;
if ( ! args )
args = {} ;
this._doSearch( url, args, onDone ) ;
}
_showPublishers( pushState ) {
this.runSpecialSearch( "/search/publishers", null, () => {
this.setWindowTitle( "All publishers" ) ;
if ( pushState )
window.history.pushState( null, document.title, "/publishers"+window.location.search ) ;
} )
}
_showTechniqueArticles( pushState ) {
this.runSpecialSearch( "/search/tag/technique", {randomize:1}, () => {
this.setWindowTitle( "Technique" ) ;
if ( pushState )
window.history.pushState( null, document.title, "/technique"+window.location.search ) ;
} )
}
_showTipsArticles( pushState ) {
this.runSpecialSearch( "/search/tag/tips", {randomize:1}, () => {
this.setWindowTitle( "Tips" ) ;
if ( pushState )
window.history.pushState( null, document.title, "/tips"+window.location.search ) ;
} )
}
_showDbReport( pushState ) {
this.setState( { showDbReport: true, searchResults: [] } ) ;
this._searchFormRef.current.setState( { queryString: "" } ) ;
this.setWindowTitle( "Database report" ) ;
if ( pushState )
window.history.pushState( null, document.title, "/report"+window.location.search ) ;
}
prependSearchResult( sr ) {
// add a new entry to the start of the search results
// NOTE: We do this after creating a new object, and while it isn't really the right thing
// to do (since the new object might not actually be a result for the current search), it's nice
// to give the user some visual feedback.
let newSearchResults = [ sr ] ;
newSearchResults.push( ...this.state.searchResults ) ;
this.setState( { searchResults: newSearchResults } ) ;
}
updatePublications( pubs ) {
// update the cache
let pubs2 = {} ;
for ( let i=0 ; i < pubs.length ; ++i ) {
const pub = pubs[ i ] ;
this.caches.publications[ pub.pub_id ] = pub ;
pubs2[ pub.pub_id ] = pub ;
}
// update the UI
updatePublisher( publ_id ) {
// update the specified publisher in the UI
this._doUpdateSearchResult(
(sr) => ( sr._type === "publisher" && sr.publ_id === publ_id ),
this.makeFlaskUrl( "/publisher/" + publ_id, {include_pubs:1,include_articles:1} )
) ;
this.forceFlaskImageReload( "publisher", publ_id ) ;
}
updatePublication( pub_id ) {
// update the specified publication in the UI
this._doUpdateSearchResult(
(sr) => ( sr._type === "publication" && sr.pub_id === pub_id ),
this.makeFlaskUrl( "/publication/" + pub_id, {include_articles:1,deep:1} )
) ;
this.forceFlaskImageReload( "publication", pub_id ) ;
}
_doUpdateSearchResult( srCheck, url ) {
// find the target search result in the UI
let newSearchResults = this.state.searchResults ;
for ( let i=0 ; i < newSearchResults.length ; ++i ) {
if ( newSearchResults[i].type === "publication" && pubs2[ newSearchResults[i].pub_id ] ) {
newSearchResults[i] = pubs2[ newSearchResults[i].pub_id ] ;
newSearchResults[i].type = "publication" ;
if ( srCheck( newSearchResults[i] ) ) {
// found it - get the latest details from the backend
axios.get( url ).then( resp => {
newSearchResults[i] = resp.data ;
this.setState( { searchResults: newSearchResults } ) ;
} ).catch( err => {
this.showErrorResponse( "Can't get the updated search result details", err ) ;
} ) ;
break ; // nb: we assume there's only 1 instance
}
}
this.setState( { searchResults: newSearchResults } ) ;
}
showModalForm( formId, title, titleColor, content, buttons ) {
@ -421,18 +471,6 @@ export class App extends React.Component
console.log( " " + detail ) ;
}
makeTagLists( tags ) {
// convert the tags into a list suitable for CreatableSelect
// NOTE: react-select uses the "value" field to determine which choices have already been selected
// and thus should not be shown in the droplist of available choices.
let tagList = [] ;
if ( tags )
tags.map( tag => tagList.push( { value: tag, label: tag } ) ) ;
// create another list for all known tags
let allTags = this.caches.tags.map( tag => { return { value: tag[0], label: tag[0] } } ) ;
return [ tagList, allTags ] ;
}
makeAppUrl( url ) {
// FUDGE! The test suite needs any URL parameters to passed on to the next page if a link is clicked.
if ( this.isTestMode() )
@ -451,21 +489,42 @@ export class App extends React.Component
}
return url ;
}
makeFlaskImageUrl( type, imageId, force ) {
makeExternalDocUrl( url ) {
// generate a URL for an external document
if ( isLink( url ) )
return url ;
if ( url.substr( 0, 2 ) === "$/" )
url = url.substr( 2 ) ;
if ( url[0] === "/" )
url = url.substr( 1 ) ;
return this.makeFlaskUrl( "/docs/" + encodeURIComponent( url ) ) ;
}
makeFlaskImageUrl( type, imageId ) {
// generate an image URL for the Flask backend server
if ( ! imageId )
return null ;
let url = this.makeFlaskUrl( "/images/" + type + "/" + imageId ) ;
if ( force )
url += "?foo=" + Math.random() ; // FUDGE! To bypass the cache :-/
const key = this._makeFlaskImageKey( type, imageId ) ;
if ( ! this._flaskImageUrlVersions[ key ] ) {
// NOTE: It would be nice to only add this if necessary (i.e. the user has changed
// the image, thus requiring us to fetch the new image), but not doing so causes problems
// in a dev environment, since we are constantly changing things in the database
// outside the app (e.g. in tests) and the browser cache will get out of sync.
this.forceFlaskImageReload( type, imageId ) ;
}
url += "?v=" + this._flaskImageUrlVersions[key] ;
return url ;
}
makeExternalDocUrl( url ) {
// generate a URL for an external document
if ( url.substr( 0, 2 ) === "$/" )
url = url.substr( 2 ) ;
return this.makeFlaskUrl( "/docs/" + encodeURIComponent(url) ) ;
forceFlaskImageReload( type, imageId ) {
// bump the image's version#, which will force a new URL the next time makeFlaskImageUrl() is called
const key = this._makeFlaskImageKey( type, imageId ) ;
const version = this._flaskImageUrlVersions[ key ] ;
// NOTE: It would be nice to start at 1, but this causes problems in a dev environment, since
// we are constantly changing things in the database, and the browser cache will get out of sync.
this._flaskImageUrlVersions[ key ] = version ? version+1 : Math.floor(Date.now()/1000) ;
}
_makeFlaskImageKey( type, imageId ) { return type + ":" + imageId ; }
_onStartupTask( taskId ) {
// flag that the specified startup task has completed
@ -481,15 +540,22 @@ export class App extends React.Component
}
_onStartupComplete() {
// startup has completed, we're ready to go
this.refs.watermark.style.opacity = 0.2 ;
if ( this.props.warning )
this.showWarningToast( this.props.warning ) ;
if ( this.props.doSearch )
this.props.doSearch() ;
else if ( this.props.type === "report" )
this._showDbReport() ;
// NOTE: We could preload the DataCache here (i.e. where it won't affect startup time),
// but it will happen on every page load (e.g. /article/NNN or /publication/NNN),
// which would probably hurt more than it helps (since the data isn't needed if the user
// is only searching for stuff i.e. most of the time).
}
setWindowTitleFromSearchResults( srType, idField, idVal, nameField ) {
for ( let sr of Object.entries( this.state.searchResults ) ) {
if ( sr[1].type === srType && String(sr[1][idField]) === idVal ) {
if ( sr[1]._type === srType && String(sr[1][idField]) === idVal ) {
this.setWindowTitle( typeof nameField === "function" ? nameField(sr[1]) : sr[1][nameField] ) ;
return ;
}
@ -497,6 +563,10 @@ export class App extends React.Component
this.setWindowTitle( null ) ;
}
setWindowTitle( caption ) {
if ( caption ) {
let doc = new DOMParser().parseFromString( caption, "text/html" ) ;
caption = doc.body.textContent ;
}
document.title = caption ? APP_NAME + " - " + caption : APP_NAME ;
}

@ -1,5 +1,6 @@
#article-form .row label.top { width: 6.5em ; }
#article-form .row label { width: 5.75em ; }
#article-form .row label.parent-mode { cursor: pointer ; }
#article-form .row.snippet { flex-direction: column ; align-items: initial ; margin-top: -0.5em ; }
#article-form .row.snippet textarea { min-height: 6em ; }

@ -3,9 +3,12 @@ import { Link } from "react-router-dom" ;
import { Menu, MenuList, MenuButton, MenuItem } from "@reach/menu-button" ;
import { ArticleSearchResult2 } from "./ArticleSearchResult2.js" ;
import "./ArticleSearchResult.css" ;
import { PublisherSearchResult } from "./PublisherSearchResult.js" ;
import { PublicationSearchResult } from "./PublicationSearchResult.js" ;
import { PreviewableImage } from "./PreviewableImage.js" ;
import { RatingStars } from "./RatingStars.js" ;
import { gAppRef } from "./App.js" ;
import { makeScenarioDisplayName, applyUpdatedVals, removeSpecialFields, makeCommaList, isLink } from "./utils.js" ;
import { makeScenarioDisplayName, updateRecord, makeCommaList } from "./utils.js" ;
const axios = require( "axios" ) ;
@ -19,59 +22,44 @@ export class ArticleSearchResult extends React.Component
// prepare the basic details
const display_title = this.props.data[ "article_title!" ] || this.props.data.article_title ;
const display_subtitle = this.props.data[ "article_subtitle!" ] || this.props.data.article_subtitle ;
const display_snippet = this.props.data[ "article_snippet!" ] || this.props.data.article_snippet ;
const pub = gAppRef.caches.publications[ this.props.data.pub_id ] ;
const image_url = gAppRef.makeFlaskImageUrl( "article", this.props.data.article_image_id, true ) ;
const display_snippet = PreviewableImage.adjustHtmlForPreviewableImages(
this.props.data[ "article_snippet!" ] || this.props.data.article_snippet
) ;
const parent_pub = this.props.data._parent_pub ;
const parent_publ = this.props.data._parent_publ ;
const image_url = gAppRef.makeFlaskImageUrl( "article", this.props.data.article_image_id ) ;
// prepare the article's URL
let article_url = this.props.data.article_url ;
if ( article_url ) {
if ( ! isLink( article_url ) )
article_url = gAppRef.makeExternalDocUrl( article_url ) ;
} else if ( pub && pub.pub_url ) {
article_url = gAppRef.makeExternalDocUrl( pub.pub_url ) ;
if ( article_url )
article_url = gAppRef.makeExternalDocUrl( article_url ) ;
else if ( parent_pub && parent_pub.pub_url ) {
article_url = gAppRef.makeExternalDocUrl( parent_pub.pub_url ) ;
if ( article_url.substr( article_url.length-4 ) === ".pdf" && this.props.data.article_pageno )
article_url += "#page=" + this.props.data.article_pageno ;
}
// prepare the authors
let authors = [] ;
if ( this.props.data[ "authors!" ] ) {
// the backend has provided us with a list of author names (possibly highlighted) - use them directly
for ( let i=0 ; i < this.props.data["authors!"].length ; ++i ) {
const author_id = this.props.data.article_authors[ i ] ;
authors.push( <Link key={i} className="author" title="Show articles from this author."
to = { gAppRef.makeAppUrl( "/author/" + author_id ) }
dangerouslySetInnerHTML = {{ __html: this.props.data["authors!"][i] }}
/> ) ;
}
} else {
// we only have a list of author ID's (the normal case) - figure out what the corresponding names are
for ( let i=0 ; i < this.props.data.article_authors.length ; ++i ) {
const author_id = this.props.data.article_authors[ i ] ;
authors.push( <Link key={i} className="author" title="Show articles from this author."
to = { gAppRef.makeAppUrl( "/author/" + author_id ) }
dangerouslySetInnerHTML = {{ __html: gAppRef.caches.authors[ author_id ].author_name }}
/> ) ;
}
const author_names_hilite = this.props.data[ "authors!" ] ;
for ( let i=0 ; i < this.props.data.article_authors.length ; ++i ) {
const author = this.props.data.article_authors[ i ] ;
const author_name = author_names_hilite ? author_names_hilite[i] : author.author_name ;
authors.push( <Link key={i} className="author" title="Show articles from this author."
to = { gAppRef.makeAppUrl( "/author/" + author.author_id ) }
dangerouslySetInnerHTML = {{ __html: author_name }}
/> ) ;
}
// prepare the scenarios
let scenarios = [] ;
if ( this.props.data[ "scenarios!" ] ) {
// the backend has provided us with a list of scenarios (possibly highlighted) - use them directly
this.props.data[ "scenarios!" ].forEach( (scenario,i) =>
scenarios.push( <span key={i} className="scenario"
dangerouslySetInnerHTML = {{ __html: makeScenarioDisplayName( scenario ) }}
/> )
) ;
} else {
// we only have a list of scenario ID's (the normal case) - figure out what the corresponding names are
this.props.data.article_scenarios.forEach( (scenario,i) =>
scenarios.push( <span key={i} className="scenario"
dangerouslySetInnerHTML = {{ __html: makeScenarioDisplayName( gAppRef.caches.scenarios[scenario] ) }}
/> )
) ;
const scenario_names_hilite = this.props.data[ "scenarios!" ] ;
for ( let i=0 ; i < this.props.data.article_scenarios.length ; ++i ) {
const scenario = this.props.data.article_scenarios[ i ] ;
const scenario_display_name = scenario_names_hilite ? scenario_names_hilite[i] : makeScenarioDisplayName(scenario) ;
scenarios.push( <span key={i} className="scenario"
dangerouslySetInnerHTML = {{ __html: scenario_display_name }}
/> ) ;
}
// prepare the tags
@ -102,18 +90,19 @@ export class ArticleSearchResult extends React.Component
const menu = ( <Menu>
<MenuButton className="sr-menu" />
<MenuList>
<MenuItem className="edit"
onSelect = { this.onEditArticle.bind( this ) }
>Edit</MenuItem>
<MenuItem className="delete"
onSelect = { this.onDeleteArticle.bind( this ) }
>Delete</MenuItem>
<MenuItem className="edit" onSelect={ () => this.onEditArticle() } >
<img src="/images/edit.png" alt="Edit." /> Edit
</MenuItem>
<MenuItem className="delete" onSelect={ () => this.onDeleteArticle() } >
<img src="/images/delete.png" alt="Delete." /> Delete
</MenuItem>
</MenuList>
</Menu> ) ;
// NOTE: The "title" field is also given the CSS class "name" so that the normal CSS will apply to it.
// Some tests also look for a generic ".name" class name when checking search results.
const pub_display_name = pub ? PublicationSearchResult.makeDisplayName( pub ) : null ;
const pub_display_name = parent_pub ? PublicationSearchResult.makeDisplayName( parent_pub ) : null ;
const publ_display_name = parent_publ ? PublisherSearchResult.makeDisplayName( parent_publ ) : null ;
return ( <div className="search-result article"
ref = { r => gAppRef.setTestAttribute( r, "article_id", this.props.data.article_id ) }
>
@ -125,7 +114,19 @@ export class ArticleSearchResult extends React.Component
dangerouslySetInnerHTML = {{ __html: pub_display_name }}
/>
}
<span className="title name" dangerouslySetInnerHTML={{ __html: display_title }} />
{ publ_display_name &&
<Link className="publisher" title="Show this publisher."
to = { gAppRef.makeAppUrl( "/publisher/" + this.props.data.publ_id ) }
dangerouslySetInnerHTML = {{ __html: publ_display_name }}
/>
}
<RatingStars rating={this.props.data.article_rating} title="Rate this article."
onChange = { this.onRatingChange.bind( this ) }
/>
<Link className="title name" title="Show this article."
to = { gAppRef.makeAppUrl( "/article/" + this.props.data.article_id ) }
dangerouslySetInnerHTML = {{ __html: display_title }}
/>
{ article_url &&
<a href={article_url} className="open-link" target="_blank" rel="noopener noreferrer">
<img src="/images/open-link.png" alt="Open article." title="Open this article." />
@ -134,69 +135,106 @@ export class ArticleSearchResult extends React.Component
{ display_subtitle && <div className="subtitle" dangerouslySetInnerHTML={{ __html: display_subtitle }} /> }
</div>
<div className="content">
{ image_url && <img src={image_url} className="image" alt="Article." /> }
{ image_url && <PreviewableImage url={image_url} noActivate={true} className="image" alt="Article." /> }
<div className="snippet" dangerouslySetInnerHTML={{__html: display_snippet}} />
</div>
<div className="footer">
{ authors.length > 0 && <div className="authors"> By {makeCommaList(authors)} </div> }
{ scenarios.length > 0 && <div className="scenarios"> Scenarios: {makeCommaList(scenarios)} </div> }
{ tags.length > 0 && <div className="tags"> Tags: {tags} </div> }
{ authors.length > 0 &&
<div className="authors"> By {makeCommaList(authors)} </div>
}
{ this.props.data.article_date &&
<div> <label>Published:</label> <span className="article_date"> {this.props.data.article_date} </span> </div>
}
{ scenarios.length > 0 &&
<div className="scenarios"> Scenarios: {makeCommaList(scenarios)} </div>
}
{ tags.length > 0 &&
<div className="tags"> Tags: {tags} </div>
}
</div>
</div> ) ;
}
static onNewArticle( notify ) {
ArticleSearchResult2._doEditArticle( {}, (newVals,refs) => {
axios.post( gAppRef.makeFlaskUrl( "/article/create", {list:1} ), newVals )
.then( resp => {
// update the caches
gAppRef.caches.authors = resp.data.authors ;
gAppRef.caches.scenarios = resp.data.scenarios ;
gAppRef.caches.tags = resp.data.tags ;
// unload any updated values
applyUpdatedVals( newVals, newVals, resp.data.updated, refs ) ;
// update the UI with the new details
notify( resp.data.article_id, newVals ) ;
if ( resp.data.warnings )
gAppRef.showWarnings( "The new article was created OK.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The new article was created OK. </div> ) ;
if ( resp.data._publication )
gAppRef.updatePublications( [ resp.data._publication ] ) ;
gAppRef.closeModalForm() ;
} )
.catch( err => {
gAppRef.showErrorMsg( <div> Couldn't create the article: <div className="monospace"> {err.toString()} </div> </div> ) ;
componentDidMount() {
PreviewableImage.activatePreviewableImages( this ) ;
}
onRatingChange( newRating, onFailed ) {
axios.post( gAppRef.makeFlaskUrl( "/article/update-rating", null ), {
article_id: this.props.data.article_id,
rating: newRating,
} ).catch( err => {
gAppRef.showErrorMsg( <div> Couldn't update the rating: <div className="monospace"> {err.toString()} </div> </div> ) ;
if ( onFailed )
onFailed() ;
} ) ;
}
static onNewArticle() {
gAppRef.dataCache.get( [ "publishers", "publications", "authors", "scenarios", "tags" ], () => {
ArticleSearchResult2._doEditArticle( {}, (newVals,refs) => {
axios.post(
gAppRef.makeFlaskUrl( "/article/create" ), newVals
).then( resp => {
gAppRef.dataCache.refresh( [ "authors", "scenarios", "tags" ] ) ;
// update the UI
const newArticle = resp.data.record ;
gAppRef.prependSearchResult( newArticle ) ;
if ( newArticle._parent_pub )
gAppRef.updatePublication( newArticle._parent_pub.pub_id ) ;
else if ( newArticle._parent_publ )
gAppRef.updatePublisher( newArticle._parent_publ.publ_id ) ;
// update the UI
if ( resp.data.warnings )
gAppRef.showWarnings( "The new article was created OK.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The new article was created OK. </div> ) ;
gAppRef.closeModalForm() ;
} ).catch( err => {
gAppRef.showErrorMsg( <div> Couldn't create the article: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ) ;
} ) ;
} ) ;
}
onEditArticle() {
ArticleSearchResult2._doEditArticle( this.props.data, (newVals,refs) => {
// send the updated details to the server
newVals.article_id = this.props.data.article_id ;
axios.post( gAppRef.makeFlaskUrl( "/article/update", {list:1} ), newVals )
.then( resp => {
// update the caches
gAppRef.caches.authors = resp.data.authors ;
gAppRef.caches.scenarios = resp.data.scenarios ;
gAppRef.caches.tags = resp.data.tags ;
// update the UI with the new details
applyUpdatedVals( this.props.data, newVals, resp.data.updated, refs ) ;
removeSpecialFields( this.props.data ) ;
this.forceUpdate() ;
if ( resp.data.warnings )
gAppRef.showWarnings( "The article was updated OK.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The article was updated OK. </div> ) ;
if ( resp.data._publications )
gAppRef.updatePublications( resp.data._publications ) ;
gAppRef.closeModalForm() ;
} )
.catch( err => {
gAppRef.showErrorMsg( <div> Couldn't update the article: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ) ;
} );
gAppRef.dataCache.get( [ "publishers", "publications", "authors", "scenarios", "tags" ], () => {
ArticleSearchResult2._doEditArticle( this.props.data, (newVals,refs) => {
// send the updated details to the server
newVals.article_id = this.props.data.article_id ;
axios.post(
gAppRef.makeFlaskUrl( "/article/update" ), newVals
).then( resp => {
gAppRef.dataCache.refresh( [ "authors", "scenarios", "tags" ] ) ;
// update the UI
const article = resp.data.record ;
const orig_parent_pub = this.props.data._parent_pub ;
const orig_parent_publ = this.props.data._parent_publ ;
updateRecord( this.props.data, article ) ;
if ( article._parent_pub )
gAppRef.updatePublication( article._parent_pub.pub_id ) ;
else if ( article._parent_publ )
gAppRef.updatePublisher( article._parent_publ.publ_id ) ;
if ( orig_parent_pub )
gAppRef.updatePublication( orig_parent_pub.pub_id ) ;
if ( orig_parent_publ )
gAppRef.updatePublisher( orig_parent_publ.publ_id ) ;
// update the UI
if ( newVals.imageData )
gAppRef.forceFlaskImageReload( "article", newVals.article_id ) ;
this.forceUpdate() ;
PreviewableImage.activatePreviewableImages( this ) ;
// update the UI
if ( resp.data.warnings )
gAppRef.showWarnings( "The article was updated OK.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The article was updated OK. </div> ) ;
gAppRef.closeModalForm() ;
} ).catch( err => {
gAppRef.showErrorMsg( <div> Couldn't update the article: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ) ;
} );
} ) ;
}
onDeleteArticle() {
@ -208,21 +246,22 @@ export class ArticleSearchResult extends React.Component
gAppRef.ask( content, "ask", {
"OK": () => {
// delete the article on the server
axios.get( gAppRef.makeFlaskUrl( "/article/delete/" + this.props.data.article_id, {list:1} ) )
.then( resp => {
// update the caches
gAppRef.caches.authors = resp.data.authors ;
gAppRef.caches.tags = resp.data.tags ;
axios.get(
gAppRef.makeFlaskUrl( "/article/delete/" + this.props.data.article_id )
).then( resp => {
gAppRef.dataCache.refresh( [ "authors", "tags" ] ) ;
// update the UI
this.props.onDelete( "article_id", this.props.data.article_id ) ;
if ( this.props.data._parent_pub )
gAppRef.updatePublication( this.props.data._parent_pub.pub_id ) ;
else if ( this.props.data._parent_publ )
gAppRef.updatePublisher( this.props.data._parent_publ.publ_id ) ;
// update the UI
if ( resp.data.warnings )
gAppRef.showWarnings( "The article was deleted.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The article was deleted. </div> ) ;
if ( resp.data._publication )
gAppRef.updatePublications( [ resp.data._publication ] ) ;
} )
.catch( err => {
} ).catch( err => {
gAppRef.showErrorToast( <div> Couldn't delete the article: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ) ;
},

@ -5,7 +5,7 @@ import { NEW_ARTICLE_PUB_PRIORITY_CUTOFF } from "./constants.js" ;
import { PublicationSearchResult } from "./PublicationSearchResult.js" ;
import { gAppRef } from "./App.js" ;
import { ImageFileUploader } from "./FileUploader.js" ;
import { makeScenarioDisplayName, parseScenarioDisplayName, checkConstraints, confirmDiscardChanges, sortSelectableOptions, unloadCreatableSelect, isNumeric } from "./utils.js" ;
import { makeScenarioDisplayName, parseScenarioDisplayName, checkConstraints, confirmDiscardChanges, sortSelectableOptions, unloadCreatableSelect, makeTagLists, isNumeric } from "./utils.js" ;
// --------------------------------------------------------------------
@ -18,6 +18,26 @@ export class ArticleSearchResult2
let refs = {} ;
const isNew = Object.keys( vals ).length === 0 ;
// set the parent mode
let parentMode = vals.publ_id ? "publisher" : "publication" ;
let publicationParentRowRef = null ;
let publisherParentRowRef = null ;
let articleDateRef = null ;
function onPublicationParent() {
parentMode = "publication" ;
publicationParentRowRef.style.display = "flex" ;
publisherParentRowRef.style.display = "none" ;
articleDateRef.style.display = "none" ;
refs.pub_id.focus() ;
}
function onPublisherParent() {
parentMode = "publisher" ;
publicationParentRowRef.style.display = "none" ;
publisherParentRowRef.style.display = "flex" ;
articleDateRef.style.display = "flex" ;
refs.publ_id.focus() ;
}
// prepare to save the initial values
let initialVals = null ;
function onReady() {
@ -28,8 +48,7 @@ export class ArticleSearchResult2
// initialize the image
let imageFilename=null, imageData=null ;
let imageRef=null, uploadImageRef=null, removeImageRef=null ;
let imageUrl = gAppRef.makeFlaskUrl( "/images/article/" + vals.article_id ) ;
imageUrl += "?foo=" + Math.random() ; // FUDGE! To bypass the cache :-/
let imageUrl = gAppRef.makeFlaskImageUrl( "article", vals.article_id ) || "/force-404" ;
function onImageLoaded() { onReady() ; }
function onMissingImage() {
imageRef.src = "/images/placeholder.png" ;
@ -55,13 +74,14 @@ export class ArticleSearchResult2
} ;
// initialize the publications
let publications = [ { value: null, label: <i>(none)</i> } ] ;
let publications = [ { value: null, label: <i>(none)</i>, textLabel: "" } ] ;
let mostRecentPub = null ;
for ( let p of Object.entries(gAppRef.caches.publications) ) {
for ( let p of Object.entries( gAppRef.dataCache.data.publications ) ) {
const pub_display_name = PublicationSearchResult.makeDisplayName( p[1] ) ;
const pub = {
value: p[1].pub_id,
label: <span dangerouslySetInnerHTML={{__html: pub_display_name}} />,
textLabel: pub_display_name,
} ;
publications.push( pub ) ;
if ( mostRecentPub === null || p[1].time_created > mostRecentPub[1] )
@ -87,27 +107,41 @@ export class ArticleSearchResult2
}
}
// initialize the publishers
let publishers = [ { value: null, label: <i>(none)</i>, textLabel: "" } ] ;
let currPubl = publishers[0] ;
for ( let p of Object.entries( gAppRef.dataCache.data.publishers ) ) {
publishers.push( {
value: p[1].publ_id,
label: <span dangerouslySetInnerHTML={{__html: p[1].publ_name}} />,
textLabel: p[1].publ_name,
} ) ;
if ( p[1].publ_id === vals.publ_id )
currPubl = publishers[ publishers.length-1 ] ;
}
sortSelectableOptions( publishers ) ;
// initialize the authors
let allAuthors = [] ;
for ( let a of Object.entries(gAppRef.caches.authors) )
for ( let a of Object.entries( gAppRef.dataCache.data.authors ) )
allAuthors.push( { value: a[1].author_id, label: a[1].author_name } );
allAuthors.sort( (lhs,rhs) => { return lhs.label.localeCompare( rhs.label ) ; } ) ;
let currAuthors = [] ;
if ( vals.article_authors ) {
currAuthors = vals.article_authors.map( a => {
return { value: a, label: gAppRef.caches.authors[a].author_name }
return { value: a.author_id, label: a.author_name }
} ) ;
}
// initialize the scenarios
let allScenarios = [] ;
for ( let s of Object.entries(gAppRef.caches.scenarios) )
for ( let s of Object.entries( gAppRef.dataCache.data.scenarios ) )
allScenarios.push( { value: s[1].scenario_id, label: makeScenarioDisplayName(s[1]) } ) ;
allScenarios.sort( (lhs,rhs) => { return lhs.label.localeCompare( rhs.label ) ; } ) ;
let currScenarios = [] ;
if ( vals.article_scenarios ) {
currScenarios = vals.article_scenarios.map( s => {
return { value: s, label: makeScenarioDisplayName(gAppRef.caches.scenarios[s]) }
return { value: s.scenario_id, label: makeScenarioDisplayName(s) }
} ) ;
}
function onScenarioCreated( val ) {
@ -120,11 +154,12 @@ export class ArticleSearchResult2
}
// initialize the tags
const tags = gAppRef.makeTagLists( vals.article_tags ) ;
const tags = makeTagLists( vals.article_tags ) ;
// prepare the form content
/* eslint-disable jsx-a11y/img-redundant-alt */
const content = <div>
<div style={{display:"flex"}}>
<div className="image-container">
<div className="row image">
<img src={imageUrl} className="image"
@ -145,19 +180,40 @@ export class ArticleSearchResult2
/>
</div>
</div>
<div className="row title"> <label className="top"> Title: </label>
<input type="text" defaultValue={vals.article_title} autoFocus ref={r => refs.article_title=r} />
</div>
<div className="row subtitle"> <label className="top"> Subtitle: </label>
<input type="text" defaultValue={vals.article_subtitle} ref={r => refs.article_subtitle=r} />
</div>
<div className="row publication"> <label className="select top"> Publication: </label>
<Select className="react-select" classNamePrefix="react-select" options={publications} isSearchable={true}
defaultValue = {currPub}
ref = { r => refs.pub_id=r }
/>
<input className="pageno" type="text" defaultValue={vals.article_pageno} ref={r => refs.article_pageno=r} title="Page number." />
<div style={{flexGrow:1}}>
<div className="row title"> <label className="top"> Title: </label>
<input type="text" defaultValue={vals.article_title} autoFocus ref={r => refs.article_title=r} />
</div>
<div className="row subtitle"> <label className="top"> Subtitle: </label>
<input type="text" defaultValue={vals.article_subtitle} ref={r => refs.article_subtitle=r} />
</div>
<div className="row publication" style={{display:parentMode==="publication"?"flex":"none"}} ref={r => publicationParentRowRef=r} >
<label className="select top parent-mode"
title = "Click to associate this article with a publisher."
onClick = {onPublisherParent}
> Publication: </label>
<Select className="react-select" classNamePrefix="react-select" options={publications} isSearchable={true} getOptionValue={o => o.textLabel}
defaultValue = {currPub}
ref = { r => refs.pub_id=r }
/>
<input className="pageno" type="text" defaultValue={vals.article_pageno} ref={r => refs.article_pageno=r} title="Page number." />
</div>
<div className="row publisher" style={{display:parentMode==="publisher"?"flex":"none"}} ref={r => publisherParentRowRef=r} >
<label className="select top parent-mode"
title="Click to associate this article with a publication."
onClick = {onPublicationParent}
> Publisher: </label>
<Select className="react-select" classNamePrefix="react-select" options={publishers} isSearchable={true} getOptionValue={o => o.textLabel}
defaultValue = {currPubl}
ref = { r => refs.publ_id=r }
/>
</div>
<div className="row article_date" style={{display:parentMode==="publisher"?"flex":"none"}}ref={r => articleDateRef=r} >
<label className="select top"> Date: </label>
<input className="article_date" type="text" defaultValue={vals.article_date} ref={r => refs.article_date=r} />
</div>
</div>
</div>
<div className="row snippet"> <label> Snippet: </label>
<textarea defaultValue={vals.article_snippet} ref={r => refs.article_snippet=r} />
</div>
@ -191,9 +247,13 @@ export class ArticleSearchResult2
function unloadVals() {
let newVals = {} ;
for ( let r in refs ) {
if ( r === "pub_id" )
newVals[ r ] = refs[r].state.value && refs[r].state.value.value ;
else if ( r === "article_authors" ) {
if ( r === "pub_id" ) {
if ( parentMode === "publication" )
newVals[ r ] = refs[r].state.value && refs[r].state.value.value ;
} else if ( r === "publ_id" ) {
if ( parentMode === "publisher" )
newVals[ r ] = refs[r].state.value && refs[r].state.value.value ;
} else if ( r === "article_authors" ) {
let vals = unloadCreatableSelect( refs[r] ) ;
newVals.article_authors = [] ;
vals.forEach( v => {
@ -213,7 +273,7 @@ export class ArticleSearchResult2
} ) ;
} else if ( r === "article_tags" ) {
let vals = unloadCreatableSelect( refs[r] ) ;
newVals[ r ] = vals.map( v => v.label ) ;
newVals[ r ] = vals.map( v => v.label ) ;
} else
newVals[ r ] = refs[r].value.trim() ;
}
@ -234,10 +294,12 @@ export class ArticleSearchResult2
[ () => newVals.article_title === "", "Please give it a title.", refs.article_title ],
] ;
const optional = [
[ () => newVals.pub_id === null, "No publication was specified.", refs.pub_id ],
[ () => newVals.article_pageno === "" && newVals.pub_id !== null, "No page number was specified.", refs.article_pageno ],
[ () => parentMode === "publication" && newVals.pub_id === null, "No publication was specified.", refs.pub_id ],
[ () => parentMode === "publisher" && newVals.publ_id === null, "No publisher was specified.", refs.pub_id ],
[ () => parentMode === "publication" && newVals.article_pageno === "" && newVals.pub_id !== null, "No page number was specified.", refs.article_pageno ],
[ () => newVals.article_pageno !== "" && newVals.pub_id === null, "A page number was specified but no publication.", refs.pub_id ],
[ () => newVals.article_pageno !== "" && !isNumeric(newVals.article_pageno), "The page number is not numeric.", refs.article_pageno ],
[ () => newVals.publ_id && newVals.article_date === "", "The article date was not specified.", refs.article_date ],
[ () => newVals.article_snippet === "", "No snippet was provided.", refs.article_snippet ],
[ () => newVals.article_authors.length === 0, "No authors were specified.", refs.article_authors ],
[ () => newVals.article_tags && newVals.article_tags.length === 1 && newVals.article_tags[0] === "tips", "This tip has no other tags." ],
@ -264,8 +326,12 @@ export class ArticleSearchResult2
} ;
// show the form
const title = ( <div style={{display:"flex"}}>
<img src="/images/menu/article.png" alt="Dialog icon." />
{isNew ? "New article" : "Edit article"}
</div> ) ;
gAppRef.showModalForm( "article-form",
isNew ? "New article" : "Edit article", "#d3edfc",
title, "#d3edfc",
content, buttons
) ;
}

@ -0,0 +1,59 @@
import React from "react" ;
import { gAppRef } from "./App.js" ;
const axios = require( "axios" ) ;
// --------------------------------------------------------------------
export class DataCache
{
constructor() {
// initialize
this.data = {} ;
}
get( keys, onOK ) {
// initialize
if ( onOK === undefined )
onOK = () => {} ;
let nOK = 0 ;
function onPartialOK() {
if ( ++nOK === keys.length ) {
onOK() ;
}
}
// refresh each key
for ( let key of keys ) {
// check if we already have the data in the cache
if ( this.data[ key ] !== undefined ) {
onPartialOK() ;
} else {
// nope - get the specified data from the backend
axios.get(
gAppRef.makeFlaskUrl( "/" + key )
).then( resp => {
// got it - update the cache
this.data[ key ] = resp.data ;
onPartialOK() ;
} ).catch( err => {
gAppRef.showErrorToast(
<div> Couldn't load the {key}: <div className="monospace"> {err.toString()} </div> </div>
) ;
} ) ;
}
}
}
refresh( keys, onOK ) {
// refresh the specified keys
for ( let key of keys )
delete this.data[ key ] ;
this.get( keys, onOK ) ;
}
}

@ -0,0 +1,24 @@
#db-report {
border: 1px solid #ccc ; border-radius: 8px ;
padding: 0.5em ;
}
#db-report .section { margin-top: 1em ; }
#db-report .section:first-of-type { margin-top: 0 ; }
#db-report h2 { border: 1px solid #ccc ; padding: 0.1em 0.2em ; background: #eee ; margin-bottom: 0.25em ; font-size: 125% ; }
#db-report h2 img.loading { height: 0.75em ; margin-left: 0.25em ; }
#db-report .db-row-counts .images { font-size: 80% ; font-style: italic ; }
#db-report .db-links .check-links-frame { display: inline-block ; position: absolute ; right: 1em ; text-align: center ; }
#db-report .db-links button.check-links { margin-bottom: 0.2em ; padding: 0.25em 0.5em ; }
#db-report .db-links .check-links-frame .status-msg { font-size: 60% ; font-style: italic ; }
#db-report .db-links .link-errors { font-size: 80% ; list-style-image: url("/public/images/link-error-bullet.png") ; }
#db-report .db-links .link-errors .status { font-family: monospace ; font-style: italic ; }
#db-report .db-images .dupe-analysis .collapsible { margin-bottom: 0.5em ; }
#db-report .db-images .dupe-analysis .hash { font-family: monospace ; font-size: 80% ; font-style: italic ; }
#db-report .db-images .image-sizes th { text-align: left ; font-weight: normal ; font-style: italic ; }
#db-report .db-images .image-sizes img { height: 0.9em ; }
#db-report .db-images .react-tabs__tab-list { margin-bottom: 0 ; font-weight: bold ; }
#db-report .db-images .react-tabs__tab-panel { border: 1px solid #aaa ; border-top-width: 0 ; padding: 0.25em 0.5em ; }

@ -0,0 +1,387 @@
import React from "react" ;
import { Link } from "react-router-dom" ;
import { Tabs, TabList, TabPanel, Tab } from 'react-tabs';
import 'react-tabs/style/react-tabs.css';
import "./DbReport.css" ;
import { PreviewableImage } from "./PreviewableImage" ;
import { gAppRef } from "./App.js" ;
import { makeCollapsibleList, pluralString, isLink } from "./utils.js" ;
const axios = require( "axios" ) ;
// --------------------------------------------------------------------
export class DbReport extends React.Component
{
// render the component
render() {
return ( <div id="db-report">
<div className="section"> <DbRowCounts /> </div>
<div className="section"> <DbLinks /> </div>
<div className="section"> <DbImages /> </div>
</div>
) ;
}
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class DbRowCounts extends React.Component
{
constructor( props ) {
// initialize
super( props ) ;
this.state = {
dbRowCounts: null,
} ;
// get the database row counts
axios.get(
gAppRef.makeFlaskUrl( "/db-report/row-counts" )
).then( resp => {
this.setState( { dbRowCounts: resp.data } ) ;
} ).catch( err => {
gAppRef.showErrorResponse( "Can't get the database row counts", err ) ;
} ) ;
}
render() {
// initialize
const dbRowCounts = this.state.dbRowCounts ;
// render the table rows
function makeRowCountRow( tableName ) {
const tableName2 = tableName[0].toUpperCase() + tableName.substring(1) ;
let nRows ;
if ( dbRowCounts ) {
nRows = dbRowCounts[ tableName ] ;
const nImages = dbRowCounts[ tableName+"_image" ] ;
if ( nImages > 0 )
nRows = ( <span>
{nRows} <span className="images">({pluralString(nImages,"image")})</span>
</span>
) ;
}
return ( <tr key={tableName}>
<td style={{paddingRight:"0.5em",fontWeight:"bold"}}> {tableName2}s: </td>
<td> {nRows} </td>
</tr>
) ;
}
let tableRows = [ "publisher", "publication", "article", "author", "scenario" ].map(
(tableName) => makeRowCountRow( tableName )
) ;
// render the component
return ( <div className="db-row-counts">
<h2> Content { !dbRowCounts && <img src="/images/loading.gif" className="loading" alt="Loading..." /> } </h2>
<table><tbody>{tableRows}</tbody></table>
</div>
) ;
}
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class DbLinks extends React.Component
{
constructor( props ) {
// initialize
super( props ) ;
this.state = {
dbLinks: null,
linksToCheck: null, currLinkToCheck: null, isFirstLinkCheck: true,
checkLinksInProgress: false, checkLinksStatusMsg: null,
linkErrors: {},
} ;
// initialize
this._getLinksToCheck() ;
}
render() {
// initialize
const dbLinks = this.state.dbLinks ;
// render the table rows
let tableRows = [] ;
for ( let key of [ "publisher", "publication", "article" ] ) {
const nDbLinks = dbLinks && dbLinks[key] ? dbLinks[key].length : null ;
const key2 = key[0].toUpperCase() + key.substring(1) + "s" ;
tableRows.push( <tr key={key}>
<td style={{paddingRight:"0.5em",fontWeight:"bold"}}> {key2}: </td>
<td style={{width:"100%"}}> {nDbLinks} </td>
</tr>
) ;
if ( this.state.linkErrors[ key ] ) {
// NOTE: Showing all the errors at once (e.g. not as a collapsible list) will be unwieldy
// if there are a lot of them, but this shouldn't happen often, and if it does, the user
// is likely to stop the check, fix the problem, then try again.
let rows = [] ;
for ( let linkError of this.state.linkErrors[ key ] ) {
const url = gAppRef.makeAppUrl( "/" + linkError[0][0] + "/" + linkError[0][1] ) ;
const targetUrl = linkError[0][3] ;
const target = isLink( targetUrl )
? <a href={targetUrl}>{targetUrl}</a>
: targetUrl ;
let errorMsg = linkError[1] && linkError[1] + ": " ;
rows.push( <li key={linkError[0]}>
<Link to={url} dangerouslySetInnerHTML={{__html:linkError[0][2]}} />
<span className="status"> ({errorMsg}{target}) </span>
</li>
) ;
}
tableRows.push( <tr key={key+"-errors"}>
<td colSpan="2">
<ul className="link-errors"> {rows} </ul>
</td>
</tr>
) ;
}
}
// render the component
const nLinksToCheck = this.state.linksToCheck ? this.state.linksToCheck.length - this.state.currLinkToCheck : null ;
const imageUrl = this.state.checkLinksInProgress ? "/images/loading.gif" : "/images/check-db-links.png" ;
return ( <div className="db-links">
<h2> Links { !dbLinks && <img src="/images/loading.gif" className="loading" alt="Loading..." /> } </h2>
{ this.state.linksToCheck && this.state.linksToCheck.length > 0 && (
<div className="check-links-frame">
<button className="check-links" style={{display:"flex"}} onClick={() => this.checkDbLinks()} >
<img src={imageUrl} style={{height:"1em",marginTop:"0.15em",marginRight:"0.5em"}} alt="Check database links." />
{ this.state.checkLinksInProgress ? "Stop checking" : "Check links (" + nLinksToCheck + ")" }
</button>
<div className="status-msg"> {this.state.checkLinksStatusMsg} </div>
</div>
) }
<table className="db-links" style={{width:"100%"}}><tbody>{tableRows}</tbody></table>
</div>
) ;
}
checkDbLinks() {
// start/stop checking links
const inProgress = ! this.state.checkLinksInProgress ;
this.setState( { checkLinksInProgress: inProgress } ) ;
if ( inProgress )
this._checkNextLink() ;
}
_checkNextLink( force ) {
// check if this is the start of a new run
if ( this.state.currLinkToCheck === 0 && !force ) {
// yup - reset the UI
this.setState( { linkErrors: {} } ) ;
// NOTE: If the user is checking the links *again*, it could be because some links were flagged
// during the first run, they've fixed them up, and want to check everything again. In this case,
// we need to re-fetch the links from the database.
if ( ! this.state.isFirstLinkCheck ) {
this._getLinksToCheck(
() => { this._checkNextLink( true ) ; },
() => { this.setState( { checkLinksInProgress: false } ) ; }
) ;
return ;
}
}
// check if this is the end of a run
if ( this.state.currLinkToCheck >= this.state.linksToCheck.length ) {
// yup - reset the UI
this.setState( {
checkLinksStatusMsg: "Checked " + pluralString( this.state.linksToCheck.length, "link" ) + ".",
currLinkToCheck: 0, // nb: to allow the user to check again
checkLinksInProgress: false,
isFirstLinkCheck: false,
} ) ;
return ;
}
// get the next link to check
const linkToCheck = this.state.linksToCheck[ this.state.currLinkToCheck ] ;
this.setState( { currLinkToCheck: this.state.currLinkToCheck + 1 } ) ;
let continueCheckLinks = () => {
// update the UI
this.setState( { checkLinksStatusMsg:
"Checked " + this.state.currLinkToCheck + " of " + pluralString( this.state.linksToCheck.length, "link" ) + "..."
} ) ;
// check the next link
if ( this.state.checkLinksInProgress )
this._checkNextLink() ;
}
// check the next link
let url = linkToCheck[3] ;
if ( url.substr( 0, 14 ) === "http://{FLASK}" )
url = gAppRef.makeFlaskUrl( url.substr( 14 ) ) ;
// NOTE: Because of CORS, we have to proxy URL's that don't belong to us via the backend :-/
let req = isLink( url )
? axios.post( gAppRef.makeFlaskUrl( "/db-report/check-link", {url:url} ) )
: axios.head( gAppRef.makeExternalDocUrl( url ) ) ;
req.then( resp => {
// the link worked - continue checking links
continueCheckLinks() ;
} ).catch( err => {
// the link failed - record the error
let newLinkErrors = this.state.linkErrors ;
if ( newLinkErrors[ linkToCheck[0] ] === undefined )
newLinkErrors[ linkToCheck[0] ] = [] ;
const errorMsg = err.response ? "HTTP " + err.response.status : null ;
newLinkErrors[ linkToCheck[0] ].push( [ linkToCheck, errorMsg ] ) ;
this.setState( { linkErrors: newLinkErrors } ) ;
// continue checking links
continueCheckLinks() ;
} ) ;
}
_getLinksToCheck( onOK, onError ) {
// get the links in the database
axios.get(
gAppRef.makeFlaskUrl( "/db-report/links" )
).then( resp => {
const dbLinks = resp.data ;
// flatten the links to a list
let linksToCheck = [] ;
for ( let key of [ "publisher", "publication", "article" ] ) {
for ( let row of dbLinks[key] ) {
linksToCheck.push( [
key, row[0], row[1], row[2]
] ) ;
}
}
this.setState( {
dbLinks: resp.data,
linksToCheck: linksToCheck,
currLinkToCheck: 0,
} ) ;
if ( onOK )
onOK() ;
} ).catch( err => {
gAppRef.showErrorResponse( "Can't get the database links", err ) ;
if ( onError )
onError() ;
} ) ;
}
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class DbImages extends React.Component
{
constructor( props ) {
// initialize
super( props ) ;
this.state = {
dbImages: null,
} ;
// get the database images
axios.get(
gAppRef.makeFlaskUrl( "/db-report/images" )
).then( resp => {
this.setState( { dbImages: resp.data } ) ;
} ).catch( err => {
gAppRef.showErrorResponse( "Can't get the database images", err ) ;
} ) ;
}
render() {
// initialize
const dbImages = this.state.dbImages ;
// render any duplicate images
let dupeImages = [] ;
if ( dbImages ) {
for ( let hash in dbImages.duplicates ) {
let parents = [] ;
for ( let row of dbImages.duplicates[hash] ) {
const url = gAppRef.makeAppUrl( "/" + row[0] + "/" + row[1] ) ;
parents.push(
<Link to={url} dangerouslySetInnerHTML={{__html:row[2]}} />
) ;
}
// NOTE: We just use the first row's image since, presumably, they will all be the same.
const row = dbImages.duplicates[hash][ 0 ] ;
const imageUrl = gAppRef.makeFlaskImageUrl( row[0], row[1] ) ;
const caption = ( <span>
Found a duplicate image <span className="hash">(md5:{hash})</span>
</span>
) ;
dupeImages.push( <div className="dupe-image" style={{display:"flex"}} key={hash} >
<PreviewableImage url={imageUrl} style={{width:"3em",marginTop:"0.1em",marginRight:"0.5em"}} />
{ makeCollapsibleList( caption, parents, 5, {flexGrow:1}, hash ) }
</div>
) ;
}
}
// render the image sizes
let tabList = [] ;
let tabPanels = [] ;
if ( dbImages ) {
function toKB( n ) { return ( n / 1024 ).toFixed( 1 ) ; }
for ( let key of [ "publisher", "publication", "article" ] ) {
const tableName2 = key[0].toUpperCase() + key.substring(1) ;
tabList.push(
<Tab key={key}> {tableName2+"s"} </Tab>
) ;
let rows = [] ;
for ( let row of dbImages[key] ) {
const url = gAppRef.makeAppUrl( "/" + key + "/" + row[1] ) ;
// NOTE: Loading every image will be expensive, but we assume we're talking to a local server.
// Otherwise, we could use a generic "preview" image, and expand it out to the real image
// when the user clicks on it.
const imageUrl = gAppRef.makeFlaskImageUrl( key, row[1] ) ;
rows.push( <tr key={row}>
<td> <PreviewableImage url={imageUrl} /> </td>
<td> {toKB(row[0])} </td>
<td> <Link to={url} dangerouslySetInnerHTML={{__html:row[2]}} /> </td>
</tr>
) ;
}
tabPanels.push( <TabPanel key={key}>
{ rows.length === 0 ? "No images found." :
<table className="image-sizes"><tbody>
<tr><th style={{width:"1.25em"}}/><th style={{paddingRight:"0.5em"}}> Size (KB) </th><th> {tableName2} </th></tr>
{rows}
</tbody></table>
}
</TabPanel>
) ;
}
}
const imageSizes = tabList.length > 0 && ( <Tabs>
<TabList> {tabList} </TabList>
{tabPanels}
</Tabs>
) ;
// render the component
return ( <div className="db-images">
<h2> Images { !dbImages && <img src="/images/loading.gif" className="loading" alt="Loading..." /> } </h2>
{ dupeImages.length > 0 &&
<div className="dupe-analysis"> {dupeImages} </div>
}
{imageSizes}
</div>
) ;
}
}

@ -1,12 +1,14 @@
.modal-form .MuiDialog-paper { width: 80% ; max-width: 50em !important ; height: 80% ; }
.modal-form .MuiPaper-rounded { border-top-right-radius: 15px ; }
.modal-form .MuiDialogTitle-root img { height: 1.25em ; margin-right: 0.5em ; }
.modal-form .row { display: flex ; align-items: center ; margin-bottom: 0.25em ; }
.modal-form .row label { margin-top: 3px ; }
.modal-form .row input , .row textarea , .row .react-select { flex-grow: 1 ; }
.modal-form .row.image { display: block ; }
.modal-form .row.image img.image { margin-right: 1em ; max-height: 5em ; cursor: pointer ; }
.modal-form .row.image img.image { margin-right: 1em ; max-width: 8em ; max-height: 5em ; cursor: pointer ; }
.modal-form .row.image img.remove-image { height: 1em ; margin-left: 0.25em ; cursor: pointer ; }
.modal-form .image-container { position: relative ; display: inline-block ; float: left ; }

@ -0,0 +1,90 @@
import React from "react" ;
import ReactDOM from "react-dom" ;
import $ from "jquery" ;
// --------------------------------------------------------------------
export class PreviewableImage extends React.Component
{
// NOTE: While the "react-modal-image" component seems to work nicely, how can we use it
// on arbitrary images in user-defined content?
// This class is a wrapper around the jQuery-based imageZoom plugin.
render() {
return ( <a href={this.props.url} className="preview" target="_blank" rel="noopener noreferrer">
<img src={this.props.url} className={this.props.className} style={this.props.style} alt={this.props.altText} />
</a> ) ;
}
static initPreviewableImages() {
// load the imageZoom script
$.getScript( {
url: "/jQuery/imageZoom/jquery.imageZoom.js",
cache: true,
} ) ;
// load the imageZoom CSS
let cssNode = document.createElement( "link" ) ;
cssNode.type = "text/css" ;
cssNode.rel = "stylesheet" ;
cssNode.href = "/jQuery/imageZoom/jquery.imageZoom.css" ;
let headNode = document.getElementsByTagName( "head" )[0] ;
headNode.appendChild( cssNode ) ;
}
static adjustHtmlForPreviewableImages( html ) {
// FUDGE! The imageZoom plugin requires images to be wrapped with a <a class="preview"> tag.
// I was hoping to be able to let the user enable the preview functionality for images
// by simply adding a "preview" attribute to their <img> tags, then locating them after render
// and dynamically wrapping them with the necessary <a class="preview"> tag, but React doesn't
// seem to like that :-/
// We instead look for such images in the HTML returned to us by the backend server, and fix it up
// before rendering it.
// initialize
if ( ! html )
return "" ;
// locate <img> tags with a class of "preview", and wrap them in a <a class="preview">.
let buf=[], pos=0 ;
const img_regex = /<img [^>]*class\s*=\s*["']preview["'][^>]*>/g ;
const url_regex = /src\s*=\s*["'](.*?)['"]/
for ( const match of html.matchAll( img_regex ) ) {
buf.push( html.substr( pos, match.index-pos ) ) ;
const match2 = url_regex.exec( match[0] ) ;
if ( match2 ) {
buf.push(
"<a href='" + match2[1] + "' class='preview'>",
match[0],
"</a>"
) ;
} else
buf.push( match[0] ) ;
pos = match.index + match[0].length ;
}
buf.push( html.substr( pos ) ) ;
return buf.join( "" ) ;
}
componentDidMount() {
if ( this.props.manualActivate ) {
// NOTE: We normally want PreviewableImage's to automatically activate themselves, but there is
// a common case where we don't want this to happen: when raw HTML is received from the backend
// and inserted like that into the page.
// In this case, <img> tags are fixed up by adjustHtmlForPreviewableImages() as raw HTML (i.e. not
// as a PreviewableImage instance), and so the page still needs to call activatePreviewableImages()
// to activate these. Since it's probably not a good idea to activate an image twice, in this case
// PreviewableImage instances should be created as "manually activated".
return ;
}
let $elem = $( ReactDOM.findDOMNode( this ) ) ;
$elem.imageZoom() ;
}
static activatePreviewableImages( rootNode ) {
// locate images marked as previewable and activate them
let $elems = $( ReactDOM.findDOMNode( rootNode ) ).find( "a.preview" ) ;
$elems.imageZoom() ;
}
}

@ -8,7 +8,11 @@
#publication-form .articles { flex-direction: column ; align-items: initial ; }
#publication-form .articles { font-size: 90% ; padding: 0 0.5em 0.5em 0.5em ; border: 1px solid #c5c5c5 ; }
#publication-form .articles legend { margin-left: 1em ; padding: 0 5px ; font-size: 100% ; }
#publication-form .articles li.draggable { list-style-type: none ; margin: 0 0 2px -16px ; border: 1px solid #91cdf5 ; padding: 2px 5px ; background: #d3edfc ; cursor: pointer ; }
#publication-form .articles li.draggable {
margin: 0 0 2px -16px ; border: 1px solid #91cdf5 ; padding: 2px 5px ; background: #d3edfc ;
list-style-type: none ; list-style-image: none ;
cursor: pointer ;
}
#publication-form .articles .pageno { font-size: 80% ; font-style: italic ; color: #666 ; }
.dragLine { border-bottom: 2px solid #1080d0 !important ; }

@ -3,9 +3,10 @@ import { Link } from "react-router-dom" ;
import { Menu, MenuList, MenuButton, MenuItem } from "@reach/menu-button" ;
import "./PublicationSearchResult.css" ;
import { PublicationSearchResult2 } from "./PublicationSearchResult2.js" ;
import { PreviewableImage } from "./PreviewableImage.js" ;
import { PUBLICATION_EXCESS_ARTICLE_THRESHOLD } from "./constants.js" ;
import { gAppRef } from "./App.js" ;
import { makeCollapsibleList, pluralString, applyUpdatedVals, removeSpecialFields, isLink } from "./utils.js" ;
import { makeCollapsibleList, pluralString, updateRecord } from "./utils.js" ;
const axios = require( "axios" ) ;
@ -17,13 +18,15 @@ export class PublicationSearchResult extends React.Component
render() {
// prepare the basic details
const display_description = this.props.data[ "pub_description!" ] || this.props.data.pub_description ;
const publ = gAppRef.caches.publishers[ this.props.data.publ_id ] ;
const image_url = PublicationSearchResult.makeImageUrl( this.props.data ) ;
const display_description = PreviewableImage.adjustHtmlForPreviewableImages(
this.props.data[ "pub_description!" ] || this.props.data.pub_description
) ;
const parent_publ = this.props.data._parent_publ ;
const image_url = PublicationSearchResult._makeImageUrl( this.props.data ) ;
// prepare the publication's URL
let pub_url = this.props.data.pub_url ;
if ( pub_url && ! isLink(pub_url) )
if ( pub_url )
pub_url = gAppRef.makeExternalDocUrl( pub_url ) ;
// prepare the tags
@ -55,8 +58,15 @@ export class PublicationSearchResult extends React.Component
if ( this.props.data.articles ) {
for ( let i=0 ; i < this.props.data.articles.length ; ++i ) {
const article = this.props.data.articles[ i ] ;
let onArticleClick = (evt) => {
// NOTE: We let the parent take a look at clicks first, so that they can scroll
// to the article if it's already on-screen.
if ( this.props.onArticleClick && this.props.onArticleClick( article.article_id ) )
evt.preventDefault() ;
} ;
articles.push( <Link title="Show this article."
to = { gAppRef.makeAppUrl( "/article/" + article.article_id ) }
onClick = {onArticleClick}
dangerouslySetInnerHTML = {{ __html: article.article_title }}
/> ) ;
}
@ -66,12 +76,12 @@ export class PublicationSearchResult extends React.Component
const menu = ( <Menu>
<MenuButton className="sr-menu" />
<MenuList>
<MenuItem className="edit"
onSelect = { this.onEditPublication.bind( this ) }
>Edit</MenuItem>
<MenuItem className="delete"
onSelect = { this.onDeletePublication.bind( this ) }
>Delete</MenuItem>
<MenuItem className="edit" onSelect={ () => this.onEditPublication() } >
<img src="/images/edit.png" alt="Edit." /> Edit
</MenuItem>
<MenuItem className="delete" onSelect={ () => this.onDeletePublication() } >
<img src="/images/delete.png" alt="Delete." /> Delete
</MenuItem>
</MenuList>
</Menu> ) ;
@ -80,10 +90,10 @@ export class PublicationSearchResult extends React.Component
>
<div className="header">
{menu}
{ publ &&
{ parent_publ &&
<Link className="publisher" title="Show this publisher."
to = { gAppRef.makeAppUrl( "/publisher/" + this.props.data.publ_id ) }
dangerouslySetInnerHTML={{ __html: publ.publ_name }}
dangerouslySetInnerHTML={{ __html: parent_publ.publ_name }}
/>
}
<Link className="name" title="Show this publication."
@ -97,7 +107,7 @@ export class PublicationSearchResult extends React.Component
}
</div>
<div className="content">
{ image_url && <img src={image_url} className="image" alt="Publication." /> }
{ image_url && <PreviewableImage url={image_url} noActivate={true} className="image" alt="Publication." /> }
<div className="description" dangerouslySetInnerHTML={{__html: display_description}} />
{ makeCollapsibleList( "Articles", articles, PUBLICATION_EXCESS_ARTICLE_THRESHOLD, {float:"left",marginBottom:"0.25em"} ) }
</div>
@ -108,58 +118,73 @@ export class PublicationSearchResult extends React.Component
</div> ) ;
}
static onNewPublication( notify ) {
PublicationSearchResult2._doEditPublication( {}, null, (newVals,refs) => {
axios.post( gAppRef.makeFlaskUrl( "/publication/create", {list:1} ), newVals )
.then( resp => {
// update the caches
gAppRef.caches.publications = resp.data.publications ;
gAppRef.caches.tags = resp.data.tags ;
// unload any updated values
applyUpdatedVals( newVals, newVals, resp.data.updated, refs ) ;
// update the UI with the new details
notify( resp.data.pub_id, newVals ) ;
if ( resp.data.warnings )
gAppRef.showWarnings( "The new publication was created OK.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The new publication was created OK. </div> ) ;
gAppRef.closeModalForm() ;
// NOTE: The parent publisher will update itself in the UI to show this new publication,
// since we've just received an updated copy of the publications.
} )
.catch( err => {
gAppRef.showErrorMsg( <div> Couldn't create the publication: <div className="monospace"> {err.toString()} </div> </div> ) ;
componentDidMount() {
PreviewableImage.activatePreviewableImages( this ) ;
}
static onNewPublication() {
gAppRef.dataCache.get( [ "publishers", "publications", "tags" ], () => {
PublicationSearchResult2._doEditPublication( {}, null, (newVals,refs) => {
axios.post(
gAppRef.makeFlaskUrl( "/publication/create" ), newVals
).then( resp => {
gAppRef.dataCache.refresh( [ "publications", "tags" ], () => {
// update the UI
const newPub = resp.data.record ;
gAppRef.prependSearchResult( newPub ) ;
if ( newPub._parent_publ )
gAppRef.updatePublisher( newPub._parent_publ.publ_id ) ;
// update the UI
if ( resp.data.warnings )
gAppRef.showWarnings( "The new publication was created OK.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The new publication was created OK. </div> ) ;
gAppRef.closeModalForm() ;
} ) ;
} ).catch( err => {
gAppRef.showErrorMsg( <div> Couldn't create the publication: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ) ;
} ) ;
} ) ;
}
onEditPublication() {
// get the articles for this publication
let articles = this.props.data.articles ; // nb: _doEditPublication() might change the order of this list
PublicationSearchResult2._doEditPublication( this.props.data, articles, (newVals,refs) => {
// send the updated details to the server
newVals.pub_id = this.props.data.pub_id ;
if ( articles )
newVals.article_order = articles.map( a => a.article_id ) ;
axios.post( gAppRef.makeFlaskUrl( "/publication/update", {list:1} ), newVals )
.then( resp => {
// update the caches
gAppRef.caches.publications = resp.data.publications ;
gAppRef.caches.tags = resp.data.tags ;
// update the UI with the new details
applyUpdatedVals( this.props.data, newVals, resp.data.updated, refs ) ;
removeSpecialFields( this.props.data ) ;
this.forceUpdate() ;
if ( resp.data.warnings )
gAppRef.showWarnings( "The publication was updated OK.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The publication was updated OK. </div> ) ;
gAppRef.closeModalForm() ;
// NOTE: The parent publisher will update itself in the UI to show this updated publication,
// since we've just received an updated copy of the publications.
} )
.catch( err => {
gAppRef.showErrorMsg( <div> Couldn't update the publication: <div className="monospace"> {err.toString()} </div> </div> ) ;
gAppRef.dataCache.get( [ "publishers", "publications", "tags" ], () => {
// get the articles for this publication
let articles = this.props.data.articles ; // nb: _doEditPublication() might change the order of this list
PublicationSearchResult2._doEditPublication( this.props.data, articles, (newVals,refs) => {
// send the updated details to the server
newVals.pub_id = this.props.data.pub_id ;
if ( articles )
newVals.article_order = articles.map( a => a.article_id ) ;
axios.post(
gAppRef.makeFlaskUrl( "/publication/update" ), newVals
).then( resp => {
// update the UI
gAppRef.dataCache.refresh( [ "publications", "tags" ], () => {
// update the UI
const pub = resp.data.record ;
const orig_parent_publ = this.props.data._parent_publ ;
updateRecord( this.props.data, pub ) ;
if ( pub._parent_publ )
gAppRef.updatePublisher( pub._parent_publ.publ_id ) ;
if ( orig_parent_publ )
gAppRef.updatePublisher( orig_parent_publ.publ_id ) ;
// update the UI
if ( newVals.imageData )
gAppRef.forceFlaskImageReload( "publication", newVals.pub_id ) ;
this.forceUpdate() ;
PreviewableImage.activatePreviewableImages( this ) ;
// update the UI
if ( resp.data.warnings )
gAppRef.showWarnings( "The publication was updated OK.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The publication was updated OK. </div> ) ;
gAppRef.closeModalForm() ;
} ) ;
} ).catch( err => {
gAppRef.showErrorMsg( <div> Couldn't update the publication: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ) ;
} ) ;
} ) ;
}
@ -187,22 +212,23 @@ export class PublicationSearchResult extends React.Component
gAppRef.ask( content, "ask", {
"OK": () => {
// delete the publication on the server
axios.get( gAppRef.makeFlaskUrl( "/publication/delete/" + this.props.data.pub_id, {list:1} ) )
.then( resp => {
// update the caches
gAppRef.caches.publications = resp.data.publications ;
gAppRef.caches.tags = resp.data.tags ;
axios.get(
gAppRef.makeFlaskUrl( "/publication/delete/" + this.props.data.pub_id )
).then( resp => {
gAppRef.dataCache.refresh( [ "publications", "tags" ] ) ;
// update the UI
this.props.onDelete( "pub_id", this.props.data.pub_id ) ;
resp.data.deleteArticles.forEach( article_id => {
this.props.onDelete( "article_id", article_id ) ;
resp.data.deletedArticles.forEach( article_id => {
this.props.onDelete( "article_id", article_id, true ) ;
} ) ;
if ( this.props.data._parent_publ )
gAppRef.updatePublisher( this.props.data._parent_publ.publ_id ) ;
// update the UI
if ( resp.data.warnings )
gAppRef.showWarnings( "The publication was deleted.", resp.data.warnings ) ;
else
gAppRef.showInfoToast( <div> The publication was deleted. </div> ) ;
} )
.catch( err => {
} ).catch( err => {
gAppRef.showErrorToast( <div> Couldn't delete the publication: <div className="monospace"> {err.toString()} </div> </div> ) ;
} ) ;
},
@ -210,11 +236,11 @@ export class PublicationSearchResult extends React.Component
} ) ;
}
// get the publication details
axios.get( gAppRef.makeFlaskUrl( "/publication/" + this.props.data.pub_id ) )
.then( resp => {
axios.get(
gAppRef.makeFlaskUrl( "/publication/" + this.props.data.pub_id )
).then( resp => {
doDelete( resp.data.nArticles ) ;
} )
.catch( err => {
} ).catch( err => {
doDelete( err ) ;
} ) ;
}
@ -232,15 +258,13 @@ export class PublicationSearchResult extends React.Component
}
_makeDisplayName( allowAlternateContent ) { return PublicationSearchResult.makeDisplayName( this.props.data, allowAlternateContent ) ; }
static makeImageUrl( vals ) {
let image_url = gAppRef.makeFlaskImageUrl( "publication", vals.pub_image_id, true ) ;
static _makeImageUrl( vals ) {
let image_url = gAppRef.makeFlaskImageUrl( "publication", vals.pub_image_id ) ;
if ( ! image_url ) {
// check if the parent publisher has an image
if ( vals.publ_id ) {
const publ = gAppRef.caches.publishers[ vals.publ_id ] ;
if ( publ )
image_url = gAppRef.makeFlaskImageUrl( "publisher", publ.publ_image_id ) ;
}
const parent_publ = vals._parent_publ ;
if ( parent_publ )
image_url = gAppRef.makeFlaskImageUrl( "publisher", parent_publ.publ_image_id ) ;
}
return image_url ;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save