remove related build artifacts

This commit is contained in:
Jörn Guy Süß 2025-07-15 12:09:27 +10:00
parent 928a331a40
commit d662e8509a
4 changed files with 0 additions and 811 deletions

View File

@ -1,211 +0,0 @@
@echo off
setlocal enabledelayedexpansion
REM --- Configuration ---
set REPO_URL=https://github.com/hapifhir/hapi-fhir-jpaserver-starter.git
set CLONE_DIR=hapi-fhir-jpaserver
set SOURCE_CONFIG_DIR=hapi-fhir-setup
set CONFIG_FILE=application.yaml
REM --- Define Paths ---
set SOURCE_CONFIG_PATH=..\%SOURCE_CONFIG_DIR%\target\classes\%CONFIG_FILE%
set DEST_CONFIG_PATH=%CLONE_DIR%\target\classes\%CONFIG_FILE%
REM === CORRECTED: Prompt for Version ===
:GetModeChoice
SET "APP_MODE=" REM Clear the variable first
echo Select Installation Mode:
echo 1. Standalone (Includes local HAPI FHIR Server - Requires Git & Maven)
echo 2. Lite (Excludes local HAPI FHIR Server - No Git/Maven needed)
CHOICE /C 12 /N /M "Enter your choice (1 or 2):"
IF ERRORLEVEL 2 (
SET APP_MODE=lite
goto :ModeSet
)
IF ERRORLEVEL 1 (
SET APP_MODE=standalone
goto :ModeSet
)
REM If somehow neither was chosen (e.g., Ctrl+C), loop back
echo Invalid input. Please try again.
goto :GetModeChoice
:ModeSet
IF "%APP_MODE%"=="" (
echo Invalid choice detected after checks. Exiting.
goto :eof
)
echo Selected Mode: %APP_MODE%
echo.
REM === END CORRECTION ===
REM === Conditionally Execute HAPI Setup ===
IF "%APP_MODE%"=="standalone" (
echo Running Standalone setup including HAPI FHIR...
echo.
REM --- Step 0: Clean up previous clone (optional) ---
echo Checking for existing directory: %CLONE_DIR%
if exist "%CLONE_DIR%" (
echo Found existing directory, removing it...
rmdir /s /q "%CLONE_DIR%"
if errorlevel 1 (
echo ERROR: Failed to remove existing directory: %CLONE_DIR%
goto :error
)
echo Existing directory removed.
) else (
echo Directory does not exist, proceeding with clone.
)
echo.
REM --- Step 1: Clone the HAPI FHIR server repository ---
echo Cloning repository: %REPO_URL% into %CLONE_DIR%...
git clone "%REPO_URL%" "%CLONE_DIR%"
if errorlevel 1 (
echo ERROR: Failed to clone repository. Check Git installation and network connection.
goto :error
)
echo Repository cloned successfully.
echo.
REM --- Step 2: Navigate into the cloned directory ---
echo Changing directory to %CLONE_DIR%...
cd "%CLONE_DIR%"
if errorlevel 1 (
echo ERROR: Failed to change directory to %CLONE_DIR%.
goto :error
)
echo Current directory: %CD%
echo.
REM --- Step 3: Build the HAPI server using Maven ---
echo ===> "Starting Maven build (Step 3)...""
cmd /c "mvn clean package -DskipTests=true -Pboot"
echo ===> Maven command finished. Checking error level...
if errorlevel 1 (
echo ERROR: Maven build failed or cmd /c failed
cd ..
goto :error
)
echo Maven build completed successfully. ErrorLevel: %errorlevel%
echo.
REM --- Step 4: Copy the configuration file ---
echo ===> "Starting file copy (Step 4)..."
echo Copying configuration file...
echo Source: %SOURCE_CONFIG_PATH%
echo Destination: target\classes\%CONFIG_FILE%
xcopy "%SOURCE_CONFIG_PATH%" "target\classes\" /Y /I
echo ===> xcopy command finished. Checking error level...
if errorlevel 1 (
echo WARNING: Failed to copy configuration file. Check if the source file exists.
echo The script will continue, but the server might use default configuration.
) else (
echo Configuration file copied successfully. ErrorLevel: %errorlevel%
)
echo.
REM --- Step 5: Navigate back to the parent directory ---
echo ===> "Changing directory back (Step 5)..."
cd ..
if errorlevel 1 (
echo ERROR: Failed to change back to the parent directory. ErrorLevel: %errorlevel%
goto :error
)
echo Current directory: %CD%
echo.
) ELSE (
echo Running Lite setup, skipping HAPI FHIR build...
REM Ensure the hapi-fhir-jpaserver directory doesn't exist or is empty if Lite mode is chosen after a standalone attempt
if exist "%CLONE_DIR%" (
echo Found existing HAPI directory in Lite mode. Removing it to avoid build issues...
rmdir /s /q "%CLONE_DIR%"
)
REM Create empty target directories expected by Dockerfile COPY, even if not used
mkdir "%CLONE_DIR%\target\classes" 2> nul
mkdir "%CLONE_DIR%\custom" 2> nul
REM Create a placeholder empty WAR file to satisfy Dockerfile COPY
echo. > "%CLONE_DIR%\target\ROOT.war"
echo. > "%CLONE_DIR%\target\classes\application.yaml"
echo Placeholder files created for Lite mode build.
echo.
)
REM === Modify docker-compose.yml to set APP_MODE ===
echo Updating docker-compose.yml with APP_MODE=%APP_MODE%...
(
echo version: '3.8'
echo services:
echo fhirflare:
echo build:
echo context: .
echo dockerfile: Dockerfile
echo ports:
echo - "5000:5000"
echo - "8080:8080" # Keep port exposed, even if Tomcat isn't running useful stuff in Lite
echo volumes:
echo - ./instance:/app/instance
echo - ./static/uploads:/app/static/uploads
echo - ./instance/hapi-h2-data/:/app/h2-data # Keep volume mounts consistent
echo - ./logs:/app/logs
echo environment:
echo - FLASK_APP=app.py
echo - FLASK_ENV=development
echo - NODE_PATH=/usr/lib/node_modules
echo - APP_MODE=%APP_MODE%
echo - APP_BASE_URL=http://localhost:5000
echo - HAPI_FHIR_URL=http://localhost:8080/fhir
echo command: supervisord -c /etc/supervisord.conf
) > docker-compose.yml.tmp
REM Check if docker-compose.yml.tmp was created successfully
if not exist docker-compose.yml.tmp (
echo ERROR: Failed to create temporary docker-compose file.
goto :error
)
REM Replace the original docker-compose.yml
del docker-compose.yml /Q > nul 2>&1
ren docker-compose.yml.tmp docker-compose.yml
echo docker-compose.yml updated successfully.
echo.
REM --- Step 6: Build Docker images ---
echo ===> Starting Docker build (Step 6)...
docker-compose build --no-cache
if errorlevel 1 (
echo ERROR: Docker Compose build failed. Check Docker installation and docker-compose.yml file. ErrorLevel: %errorlevel%
goto :error
)
echo Docker images built successfully. ErrorLevel: %errorlevel%
echo.
REM --- Step 7: Start Docker containers ---
echo ===> Starting Docker containers (Step 7)...
docker-compose up -d
if errorlevel 1 (
echo ERROR: Docker Compose up failed. Check Docker installation and container configurations. ErrorLevel: %errorlevel%
goto :error
)
echo Docker containers started successfully. ErrorLevel: %errorlevel%
echo.
echo ====================================
echo Script finished successfully! (Mode: %APP_MODE%)
echo ====================================
goto :eof
:error
echo ------------------------------------
echo An error occurred. Script aborted.
echo ------------------------------------
pause
exit /b 1
:eof
echo Script execution finished.
pause

View File

@ -1,147 +0,0 @@
#!/bin/bash
set -e
# --- Configuration ---
REPO_URL="https://github.com/hapifhir/hapi-fhir-jpaserver-starter.git"
CLONE_DIR="hapi-fhir-jpaserver"
SOURCE_CONFIG_DIR="hapi-fhir-setup"
CONFIG_FILE="application.yaml"
# --- Define Paths ---
SOURCE_CONFIG_PATH="../$SOURCE_CONFIG_DIR/target/classes/$CONFIG_FILE"
DEST_CONFIG_PATH="$CLONE_DIR/target/classes/$CONFIG_FILE"
# === Prompt for Version ===
while true; do
echo "Select Installation Mode:"
echo "1. Standalone (Includes local HAPI FHIR Server - Requires Git & Maven)"
echo "2. Lite (Excludes local HAPI FHIR Server - No Git/Maven needed)"
read -p "Enter your choice (1 or 2): " choice
case $choice in
1)
APP_MODE="standalone"
break
;;
2)
APP_MODE="lite"
break
;;
*)
echo "Invalid input. Please try again."
;;
esac
done
echo "Selected Mode: $APP_MODE"
echo
# === Conditionally Execute HAPI Setup ===
if [ "$APP_MODE" = "standalone" ]; then
echo "Running Standalone setup including HAPI FHIR..."
echo
# Step 0: Clean up previous clone (optional)
if [ -d "$CLONE_DIR" ]; then
echo "Found existing directory, removing it..."
rm -rf "$CLONE_DIR"
echo "Existing directory removed."
else
echo "Directory does not exist, proceeding with clone."
fi
echo
# Step 1: Clone the HAPI FHIR server repository
echo "Cloning repository: $REPO_URL into $CLONE_DIR..."
git clone "$REPO_URL" "$CLONE_DIR"
echo "Repository cloned successfully."
echo
# Step 2: Navigate into the cloned directory
cd "$CLONE_DIR"
echo "Current directory: $(pwd)"
echo
# Step 3: Build the HAPI server using Maven
echo "===> Starting Maven build (Step 3)..."
mvn clean package -DskipTests=true -Pboot
echo "Maven build completed successfully."
echo
# Step 4: Copy the configuration file
echo "===> Starting file copy (Step 4)..."
echo "Copying configuration file..."
echo "Source: $SOURCE_CONFIG_PATH"
echo "Destination: target/classes/$CONFIG_FILE"
cp "$SOURCE_CONFIG_PATH" "target/classes/" || echo "WARNING: Failed to copy configuration file. The server might use default configuration."
echo "Configuration file copy step finished."
echo
# Step 5: Navigate back to the parent directory
cd ..
echo "Current directory: $(pwd)"
echo
else
echo "Running Lite setup, skipping HAPI FHIR build..."
# Ensure the hapi-fhir-jpaserver directory doesn't exist or is empty if Lite mode is chosen after a standalone attempt
if [ -d "$CLONE_DIR" ]; then
echo "Found existing HAPI directory in Lite mode. Removing it to avoid build issues..."
rm -rf "$CLONE_DIR"
fi
# Create empty target directories expected by Dockerfile COPY, even if not used
mkdir -p "$CLONE_DIR/target/classes"
mkdir -p "$CLONE_DIR/custom"
# Create a placeholder empty WAR file to satisfy Dockerfile COPY
touch "$CLONE_DIR/target/ROOT.war"
touch "$CLONE_DIR/target/classes/application.yaml"
echo "Placeholder files created for Lite mode build."
echo
fi
# === Modify docker-compose.yml to set APP_MODE ===
echo "Updating docker-compose.yml with APP_MODE=$APP_MODE..."
cat <<EOF > docker-compose.yml.tmp
version: '3.8'
services:
fhirflare:
build:
context: .
dockerfile: Dockerfile
ports:
- "5000:5000"
- "8080:8080"
volumes:
- ./instance:/app/instance
- ./static/uploads:/app/static/uploads
- ./instance/hapi-h2-data/:/app/h2-data
- ./logs:/app/logs
environment:
- FLASK_APP=app.py
- FLASK_ENV=development
- NODE_PATH=/usr/lib/node_modules
- APP_MODE=$APP_MODE
- APP_BASE_URL=http://localhost:5000
- HAPI_FHIR_URL=http://localhost:8080/fhir
command: supervisord -c /etc/supervisord.conf
EOF
# Replace the original docker-compose.yml
mv docker-compose.yml.tmp docker-compose.yml
echo "docker-compose.yml updated successfully."
echo
# --- Step 6: Build Docker images ---
echo "===> Starting Docker build (Step 6)..."
docker compose build --no-cache
echo "Docker images built successfully."
echo
# --- Step 7: Start Docker containers ---
echo "===> Starting Docker containers (Step 7)..."
docker compose up -d
echo "Docker containers started successfully."
echo
echo "===================================="
echo "Script finished successfully! (Mode: $APP_MODE)"
echo "===================================="

View File

@ -1,111 +0,0 @@
# Application Build and Run Guide - MANUAL STEPS
This guide outlines the steps to set up, build, and run the application, including the HAPI FHIR server component and the rest of the application managed via Docker Compose.
## Prerequisites
Before you begin, ensure you have the following installed on your system:
* [Git](https://git-scm.com/)
* [Maven](https://maven.apache.org/)
* [Java Development Kit (JDK)](https://www.oracle.com/java/technologies/downloads/) (Ensure compatibility with the HAPI FHIR version)
* [Docker](https://www.docker.com/products/docker-desktop/)
* [Docker Compose](https://docs.docker.com/compose/install/) (Often included with Docker Desktop)
## Setup and Build
Follow these steps to clone the necessary repository and build the components.
### 1. Clone and Build the HAPI FHIR Server
First, clone the HAPI FHIR JPA Server Starter project and build the server application.
# Step 1: Clone the repository
git clone https://github.com/hapifhir/hapi-fhir-jpaserver-starter.git hapi-fhir-jpaserver hapi-fhir-jpaserver
# Navigate into the cloned directory
cd hapi-fhir-jpaserver
copy the folder from hapi-fhir-setup/target/classes/application.yaml to the hapi-fhir-jpaserver/target/classes/application.yaml folder created above
# Step 2: Build the HAPI server package (skipping tests, using 'boot' profile)
# This creates the runnable WAR file in the 'target/' directory
mvn clean package -DskipTests=true -Pboot
# Return to the parent directory (or your project root)
cd ..
2. Build the Rest of the Application (Docker)
Next, build the Docker images for the remaining parts of the application as defined in your docker-compose.yml file. Run this command from the root directory where your docker-compose.yml file is located.
# Step 3: Build Docker images without using cache
docker-compose build --no-cache
Running the Application
Option A: Running the Full Application (Recommended)
Use Docker Compose to start all services, including (presumably) the HAPI FHIR server if it's configured in your docker-compose.yml. Run this from the root directory containing your docker-compose.yml.
# Step 4: Start all services defined in docker-compose.yml in detached mode
docker-compose up -d
Option B: Running the HAPI FHIR Server Standalone (Debugging Only)
This method runs only the HAPI FHIR server directly using the built WAR file. Use this primarily for debugging the server in isolation.
# Navigate into the HAPI server directory where you built it
cd hapi-fhir-jpaserver
# Run the WAR file directly using Java
java -jar target/ROOT.war
# Note: You might need to configure ports or database connections
# separately when running this way, depending on the application's needs.
# Remember to navigate back when done
# cd ..
Useful Docker Commands
Here are some helpful commands for interacting with your running Docker containers:
Copying files from a container:
To copy a file from a running container to your local machine's current directory:
# Syntax: docker cp <CONTAINER_ID_OR_NAME>:<PATH_IN_CONTAINER> <LOCAL_DESTINATION_PATH>
docker cp <CONTAINER_ID>:/app/PATH/Filename.ext .
(Replace <CONTAINER_ID>, /app/PATH/Filename.ext with actual values. . refers to the current directory on your host machine.)
Accessing a container's shell:
To get an interactive bash shell inside a running container:
# Syntax: docker exec -it <CONTAINER_ID_OR_NAME> bash
docker exec -it <CONTAINER_ID> bash
(Replace <CONTAINER_ID> with the actual container ID or name. You can find this using docker ps.)
Viewing running containers:
docker ps
Viewing application logs:
# Follow logs for all services
docker-compose logs -f
# Follow logs for a specific service
docker-compose logs -f <SERVICE_NAME>
(Replace <SERVICE_NAME> with the name defined in your docker-compose.yml)
Stopping the application:
To stop the services started with docker-compose up -d:
docker-compose down

View File

@ -1,342 +0,0 @@
#Uncomment the "servlet" and "context-path" lines below to make the fhir endpoint available at /example/path/fhir instead of the default value of /fhir
server:
# servlet:
# context-path: /example/path
port: 8080
#Adds the option to go to eg. http://localhost:8080/actuator/health for seeing the running configuration
#see https://docs.spring.io/spring-boot/docs/current/reference/html/actuator.html#actuator.endpoints
management:
#The following configuration will enable the actuator endpoints at /actuator/health, /actuator/info, /actuator/prometheus, /actuator/metrics. For security purposes, only /actuator/health is enabled by default.
endpoints:
enabled-by-default: false
web:
exposure:
include: 'health' # or e.g. 'info,health,prometheus,metrics' or '*' for all'
endpoint:
info:
enabled: true
metrics:
enabled: true
health:
enabled: true
probes:
enabled: true
group:
liveness:
include:
- livenessState
- readinessState
prometheus:
enabled: true
prometheus:
metrics:
export:
enabled: true
spring:
main:
allow-circular-references: true
flyway:
enabled: false
baselineOnMigrate: true
fail-on-missing-locations: false
datasource:
#url: 'jdbc:h2:file:./target/database/h2'
url: jdbc:h2:file:/app/h2-data/fhir;DB_CLOSE_DELAY=-1;AUTO_SERVER=TRUE
#url: jdbc:h2:mem:test_mem
username: sa
password: null
driverClassName: org.h2.Driver
max-active: 15
# database connection pool size
hikari:
maximum-pool-size: 10
jpa:
properties:
hibernate.format_sql: false
hibernate.show_sql: false
#Hibernate dialect is automatically detected except Postgres and H2.
#If using H2, then supply the value of ca.uhn.fhir.jpa.model.dialect.HapiFhirH2Dialect
#If using postgres, then supply the value of ca.uhn.fhir.jpa.model.dialect.HapiFhirPostgresDialect
hibernate.dialect: ca.uhn.fhir.jpa.model.dialect.HapiFhirH2Dialect
# hibernate.hbm2ddl.auto: update
# hibernate.jdbc.batch_size: 20
# hibernate.cache.use_query_cache: false
# hibernate.cache.use_second_level_cache: false
# hibernate.cache.use_structured_entries: false
# hibernate.cache.use_minimal_puts: false
### These settings will enable fulltext search with lucene or elastic
hibernate.search.enabled: false
### lucene parameters
# hibernate.search.backend.type: lucene
# hibernate.search.backend.analysis.configurer: ca.uhn.fhir.jpa.search.HapiHSearchAnalysisConfigurers$HapiLuceneAnalysisConfigurer
# hibernate.search.backend.directory.type: local-filesystem
# hibernate.search.backend.directory.root: target/lucenefiles
# hibernate.search.backend.lucene_version: lucene_current
### elastic parameters ===> see also elasticsearch section below <===
# hibernate.search.backend.type: elasticsearch
# hibernate.search.backend.analysis.configurer: ca.uhn.fhir.jpa.search.HapiHSearchAnalysisConfigurers$HapiElasticAnalysisConfigurer
hapi:
fhir:
### This flag when enabled to true, will avail evaluate measure operations from CR Module.
### Flag is false by default, can be passed as command line argument to override.
cr:
enabled: false
caregaps:
reporter: "default"
section_author: "default"
cql:
use_embedded_libraries: true
compiler:
### These are low-level compiler options.
### They are not typically needed by most users.
# validate_units: true
# verify_only: false
# compatibility_level: "1.5"
error_level: Info
signature_level: All
# analyze_data_requirements: false
# collapse_data_requirements: false
# translator_format: JSON
# enable_date_range_optimization: true
enable_annotations: true
enable_locators: true
enable_results_type: true
enable_detailed_errors: true
# disable_list_traversal: false
# disable_list_demotion: false
# enable_interval_demotion: false
# enable_interval_promotion: false
# disable_method_invocation: false
# require_from_keyword: false
# disable_default_model_info_load: false
runtime:
debug_logging_enabled: false
# enable_validation: false
# enable_expression_caching: true
terminology:
valueset_preexpansion_mode: REQUIRE # USE_IF_PRESENT, REQUIRE, IGNORE
valueset_expansion_mode: PERFORM_NAIVE_EXPANSION # AUTO, USE_EXPANSION_OPERATION, PERFORM_NAIVE_EXPANSION
valueset_membership_mode: USE_EXPANSION # AUTO, USE_VALIDATE_CODE_OPERATION, USE_EXPANSION
code_lookup_mode: USE_VALIDATE_CODE_OPERATION # AUTO, USE_VALIDATE_CODE_OPERATION, USE_CODESYSTEM_URL
data:
search_parameter_mode: USE_SEARCH_PARAMETERS # AUTO, USE_SEARCH_PARAMETERS, FILTER_IN_MEMORY
terminology_parameter_mode: FILTER_IN_MEMORY # AUTO, USE_VALUE_SET_URL, USE_INLINE_CODES, FILTER_IN_MEMORY
profile_mode: DECLARED # ENFORCED, DECLARED, OPTIONAL, TRUST, OFF
cdshooks:
enabled: false
clientIdHeaderName: client_id
### This enables the swagger-ui at /fhir/swagger-ui/index.html as well as the /fhir/api-docs (see https://hapifhir.io/hapi-fhir/docs/server_plain/openapi.html)
openapi_enabled: true
### This is the FHIR version. Choose between, DSTU2, DSTU3, R4 or R5
fhir_version: R4
### Flag is false by default. This flag enables runtime installation of IG's.
ig_runtime_upload_enabled: false
### This flag when enabled to true, will avail evaluate measure operations from CR Module.
### enable to use the ApacheProxyAddressStrategy which uses X-Forwarded-* headers
### to determine the FHIR server address
# use_apache_address_strategy: false
### forces the use of the https:// protocol for the returned server address.
### alternatively, it may be set using the X-Forwarded-Proto header.
# use_apache_address_strategy_https: false
### enables the server to overwrite defaults on HTML, css, etc. under the url pattern of eg. /content/custom **
### Folder with custom content MUST be named custom. If omitted then default content applies
custom_content_path: ./custom
### enables the server host custom content. If e.g. the value ./configs/app is supplied then the content
### will be served under /web/app
#app_content_path: ./configs/app
### enable to set the Server URL
# server_address: http://hapi.fhir.org/baseR4
# defer_indexing_for_codesystems_of_size: 101
### Flag is true by default. This flag filters resources during package installation, allowing only those resources with a valid status (e.g. active) to be installed.
# validate_resource_status_for_package_upload: false
# install_transitive_ig_dependencies: true
#implementationguides:
### example from registry (packages.fhir.org)
# swiss:
# name: swiss.mednet.fhir
# version: 0.8.0
# reloadExisting: false
# installMode: STORE_AND_INSTALL
# example not from registry
# ips_1_0_0:
# packageUrl: https://build.fhir.org/ig/HL7/fhir-ips/package.tgz
# name: hl7.fhir.uv.ips
# version: 1.0.0
# supported_resource_types:
# - Patient
# - Observation
##################################################
# Allowed Bundle Types for persistence (defaults are: COLLECTION,DOCUMENT,MESSAGE)
##################################################
# allowed_bundle_types: COLLECTION,DOCUMENT,MESSAGE,TRANSACTION,TRANSACTIONRESPONSE,BATCH,BATCHRESPONSE,HISTORY,SEARCHSET
# allow_cascading_deletes: true
# allow_contains_searches: true
# allow_external_references: true
# allow_multiple_delete: true
# allow_override_default_search_params: true
# auto_create_placeholder_reference_targets: false
# mass_ingestion_mode_enabled: false
### tells the server to automatically append the current version of the target resource to references at these paths
# auto_version_reference_at_paths: Device.patient, Device.location, Device.parent, DeviceMetric.parent, DeviceMetric.source, Observation.device, Observation.subject
# ips_enabled: false
# default_encoding: JSON
# default_pretty_print: true
# default_page_size: 20
# delete_expunge_enabled: true
# enable_repository_validating_interceptor: true
# enable_index_missing_fields: false
# enable_index_of_type: true
# enable_index_contained_resource: false
# upliftedRefchains_enabled: true
# resource_dbhistory_enabled: false
### !!Extended Lucene/Elasticsearch Indexing is still a experimental feature, expect some features (e.g. _total=accurate) to not work as expected!!
### more information here: https://hapifhir.io/hapi-fhir/docs/server_jpa/elastic.html
advanced_lucene_indexing: false
bulk_export_enabled: false
bulk_import_enabled: false
# language_search_parameter_enabled: true
# enforce_referential_integrity_on_delete: false
# This is an experimental feature, and does not fully support _total and other FHIR features.
# enforce_referential_integrity_on_delete: false
# enforce_referential_integrity_on_write: false
# etag_support_enabled: true
# expunge_enabled: true
# client_id_strategy: ALPHANUMERIC
# server_id_strategy: SEQUENTIAL_NUMERIC
# fhirpath_interceptor_enabled: false
# filter_search_enabled: true
# graphql_enabled: true
narrative_enabled: true
mdm_enabled: false
mdm_rules_json_location: "mdm-rules.json"
## see: https://hapifhir.io/hapi-fhir/docs/interceptors/built_in_server_interceptors.html#jpa-server-retry-on-version-conflicts
# userRequestRetryVersionConflictsInterceptorEnabled : false
# local_base_urls:
# - https://hapi.fhir.org/baseR4
# pre_expand_value_sets: true
# enable_task_pre_expand_value_sets: true
# pre_expand_value_sets_default_count: 1000
# pre_expand_value_sets_max_count: 1000
# maximum_expansion_size: 1000
logical_urls:
- http://terminology.hl7.org/*
- https://terminology.hl7.org/*
- http://snomed.info/*
- https://snomed.info/*
- http://unitsofmeasure.org/*
- https://unitsofmeasure.org/*
- http://loinc.org/*
- https://loinc.org/*
# partitioning:
# allow_references_across_partitions: false
# partitioning_include_in_search_hashes: false
# conditional_create_duplicate_identifiers_enabled: false
cors:
allow_Credentials: true
# These are allowed_origin patterns, see: https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/cors/CorsConfiguration.html#setAllowedOriginPatterns-java.util.List-
allowed_origin:
- '*'
# Search coordinator thread pool sizes
search-coord-core-pool-size: 20
search-coord-max-pool-size: 100
search-coord-queue-capacity: 200
# Search Prefetch Thresholds.
# This setting sets the number of search results to prefetch. For example, if this list
# is set to [100, 1000, -1] then the server will initially load 100 results and not
# attempt to load more. If the user requests subsequent page(s) of results and goes
# past 100 results, the system will load the next 900 (up to the following threshold of 1000).
# The system will progressively work through these thresholds.
# A threshold of -1 means to load all results. Note that if the final threshold is a
# number other than -1, the system will never prefetch more than the given number.
search_prefetch_thresholds: 13,503,2003,-1
# comma-separated package names, will be @ComponentScan'ed by Spring to allow for creating custom Spring beans
#custom-bean-packages:
# comma-separated list of fully qualified interceptor classes.
# classes listed here will be fetched from the Spring context when combined with 'custom-bean-packages',
# or will be instantiated via reflection using an no-arg contructor; then registered with the server
#custom-interceptor-classes:
# comma-separated list of fully qualified provider classes.
# classes listed here will be fetched from the Spring context when combined with 'custom-bean-packages',
# or will be instantiated via reflection using an no-arg contructor; then registered with the server
#custom-provider-classes:
# Threadpool size for BATCH'ed GETs in a bundle.
# bundle_batch_pool_size: 10
# bundle_batch_pool_max_size: 50
# logger:
# error_format: 'ERROR - ${requestVerb} ${requestUrl}'
# format: >-
# Path[${servletPath}] Source[${requestHeader.x-forwarded-for}]
# Operation[${operationType} ${operationName} ${idOrResourceName}]
# UA[${requestHeader.user-agent}] Params[${requestParameters}]
# ResponseEncoding[${responseEncodingNoDefault}]
# log_exceptions: true
# name: fhirtest.access
# max_binary_size: 104857600
# max_page_size: 200
# retain_cached_searches_mins: 60
# reuse_cached_search_results_millis: 60000
tester:
home:
name: FHIRFLARE Tester
server_address: http://localhost:8080/fhir
refuse_to_fetch_third_party_urls: false
fhir_version: R4
global:
name: Global Tester
server_address: "http://hapi.fhir.org/baseR4"
refuse_to_fetch_third_party_urls: false
fhir_version: R4
# validation:
# requests_enabled: true
# responses_enabled: true
# binary_storage_enabled: true
inline_resource_storage_below_size: 4000
# bulk_export_enabled: true
# subscription:
# resthook_enabled: true
# websocket_enabled: false
# polling_interval_ms: 5000
# immediately_queued: false
# email:
# from: some@test.com
# host: google.com
# port:
# username:
# password:
# auth:
# startTlsEnable:
# startTlsRequired:
# quitWait:
# lastn_enabled: true
# store_resource_in_lucene_index_enabled: true
### This is configuration for normalized quantity search level default is 0
### 0: NORMALIZED_QUANTITY_SEARCH_NOT_SUPPORTED - default
### 1: NORMALIZED_QUANTITY_STORAGE_SUPPORTED
### 2: NORMALIZED_QUANTITY_SEARCH_SUPPORTED
# normalized_quantity_search_level: 2
#elasticsearch:
# debug:
# pretty_print_json_log: false
# refresh_after_write: false
# enabled: false
# password: SomePassword
# required_index_status: YELLOW
# rest_url: 'localhost:9200'
# protocol: 'http'
# schema_management_strategy: CREATE
# username: SomeUsername