This commit is contained in:
Joshua Hare 2025-08-04 15:15:46 +10:00
parent 7eaa94a705
commit 3744123361
2 changed files with 453 additions and 0 deletions

111
hapi-fhir-Setup/README.md Normal file
View File

@ -0,0 +1,111 @@
# Application Build and Run Guide - MANUAL STEPS
This guide outlines the steps to set up, build, and run the application, including the HAPI FHIR server component and the rest of the application managed via Docker Compose.
## Prerequisites
Before you begin, ensure you have the following installed on your system:
* [Git](https://git-scm.com/)
* [Maven](https://maven.apache.org/)
* [Java Development Kit (JDK)](https://www.oracle.com/java/technologies/downloads/) (Ensure compatibility with the HAPI FHIR version)
* [Docker](https://www.docker.com/products/docker-desktop/)
* [Docker Compose](https://docs.docker.com/compose/install/) (Often included with Docker Desktop)
## Setup and Build
Follow these steps to clone the necessary repository and build the components.
### 1. Clone and Build the HAPI FHIR Server
First, clone the HAPI FHIR JPA Server Starter project and build the server application.
# Step 1: Clone the repository
git clone https://github.com/hapifhir/hapi-fhir-jpaserver-starter.git hapi-fhir-jpaserver hapi-fhir-jpaserver
# Navigate into the cloned directory
cd hapi-fhir-jpaserver
copy the folder from hapi-fhir-setup/target/classes/application.yaml to the hapi-fhir-jpaserver/target/classes/application.yaml folder created above
# Step 2: Build the HAPI server package (skipping tests, using 'boot' profile)
# This creates the runnable WAR file in the 'target/' directory
mvn clean package -DskipTests=true -Pboot
# Return to the parent directory (or your project root)
cd ..
2. Build the Rest of the Application (Docker)
Next, build the Docker images for the remaining parts of the application as defined in your docker-compose.yml file. Run this command from the root directory where your docker-compose.yml file is located.
# Step 3: Build Docker images without using cache
docker-compose build --no-cache
Running the Application
Option A: Running the Full Application (Recommended)
Use Docker Compose to start all services, including (presumably) the HAPI FHIR server if it's configured in your docker-compose.yml. Run this from the root directory containing your docker-compose.yml.
# Step 4: Start all services defined in docker-compose.yml in detached mode
docker-compose up -d
Option B: Running the HAPI FHIR Server Standalone (Debugging Only)
This method runs only the HAPI FHIR server directly using the built WAR file. Use this primarily for debugging the server in isolation.
# Navigate into the HAPI server directory where you built it
cd hapi-fhir-jpaserver
# Run the WAR file directly using Java
java -jar target/ROOT.war
# Note: You might need to configure ports or database connections
# separately when running this way, depending on the application's needs.
# Remember to navigate back when done
# cd ..
Useful Docker Commands
Here are some helpful commands for interacting with your running Docker containers:
Copying files from a container:
To copy a file from a running container to your local machine's current directory:
# Syntax: docker cp <CONTAINER_ID_OR_NAME>:<PATH_IN_CONTAINER> <LOCAL_DESTINATION_PATH>
docker cp <CONTAINER_ID>:/app/PATH/Filename.ext .
(Replace <CONTAINER_ID>, /app/PATH/Filename.ext with actual values. . refers to the current directory on your host machine.)
Accessing a container's shell:
To get an interactive bash shell inside a running container:
# Syntax: docker exec -it <CONTAINER_ID_OR_NAME> bash
docker exec -it <CONTAINER_ID> bash
(Replace <CONTAINER_ID> with the actual container ID or name. You can find this using docker ps.)
Viewing running containers:
docker ps
Viewing application logs:
# Follow logs for all services
docker-compose logs -f
# Follow logs for a specific service
docker-compose logs -f <SERVICE_NAME>
(Replace <SERVICE_NAME> with the name defined in your docker-compose.yml)
Stopping the application:
To stop the services started with docker-compose up -d:
docker-compose down

View File

@ -0,0 +1,342 @@
#Uncomment the "servlet" and "context-path" lines below to make the fhir endpoint available at /example/path/fhir instead of the default value of /fhir
server:
# servlet:
# context-path: /example/path
port: 8080
#Adds the option to go to eg. http://localhost:8080/actuator/health for seeing the running configuration
#see https://docs.spring.io/spring-boot/docs/current/reference/html/actuator.html#actuator.endpoints
management:
#The following configuration will enable the actuator endpoints at /actuator/health, /actuator/info, /actuator/prometheus, /actuator/metrics. For security purposes, only /actuator/health is enabled by default.
endpoints:
enabled-by-default: false
web:
exposure:
include: 'health' # or e.g. 'info,health,prometheus,metrics' or '*' for all'
endpoint:
info:
enabled: true
metrics:
enabled: true
health:
enabled: true
probes:
enabled: true
group:
liveness:
include:
- livenessState
- readinessState
prometheus:
enabled: true
prometheus:
metrics:
export:
enabled: true
spring:
main:
allow-circular-references: true
flyway:
enabled: false
baselineOnMigrate: true
fail-on-missing-locations: false
datasource:
#url: 'jdbc:h2:file:./target/database/h2'
url: jdbc:h2:file:/app/h2-data/fhir;DB_CLOSE_DELAY=-1;AUTO_SERVER=TRUE
#url: jdbc:h2:mem:test_mem
username: sa
password: null
driverClassName: org.h2.Driver
max-active: 15
# database connection pool size
hikari:
maximum-pool-size: 10
jpa:
properties:
hibernate.format_sql: false
hibernate.show_sql: false
#Hibernate dialect is automatically detected except Postgres and H2.
#If using H2, then supply the value of ca.uhn.fhir.jpa.model.dialect.HapiFhirH2Dialect
#If using postgres, then supply the value of ca.uhn.fhir.jpa.model.dialect.HapiFhirPostgresDialect
hibernate.dialect: ca.uhn.fhir.jpa.model.dialect.HapiFhirH2Dialect
# hibernate.hbm2ddl.auto: update
# hibernate.jdbc.batch_size: 20
# hibernate.cache.use_query_cache: false
# hibernate.cache.use_second_level_cache: false
# hibernate.cache.use_structured_entries: false
# hibernate.cache.use_minimal_puts: false
### These settings will enable fulltext search with lucene or elastic
hibernate.search.enabled: false
### lucene parameters
# hibernate.search.backend.type: lucene
# hibernate.search.backend.analysis.configurer: ca.uhn.fhir.jpa.search.HapiHSearchAnalysisConfigurers$HapiLuceneAnalysisConfigurer
# hibernate.search.backend.directory.type: local-filesystem
# hibernate.search.backend.directory.root: target/lucenefiles
# hibernate.search.backend.lucene_version: lucene_current
### elastic parameters ===> see also elasticsearch section below <===
# hibernate.search.backend.type: elasticsearch
# hibernate.search.backend.analysis.configurer: ca.uhn.fhir.jpa.search.HapiHSearchAnalysisConfigurers$HapiElasticAnalysisConfigurer
hapi:
fhir:
### This flag when enabled to true, will avail evaluate measure operations from CR Module.
### Flag is false by default, can be passed as command line argument to override.
cr:
enabled: false
caregaps:
reporter: "default"
section_author: "default"
cql:
use_embedded_libraries: true
compiler:
### These are low-level compiler options.
### They are not typically needed by most users.
# validate_units: true
# verify_only: false
# compatibility_level: "1.5"
error_level: Info
signature_level: All
# analyze_data_requirements: false
# collapse_data_requirements: false
# translator_format: JSON
# enable_date_range_optimization: true
enable_annotations: true
enable_locators: true
enable_results_type: true
enable_detailed_errors: true
# disable_list_traversal: false
# disable_list_demotion: false
# enable_interval_demotion: false
# enable_interval_promotion: false
# disable_method_invocation: false
# require_from_keyword: false
# disable_default_model_info_load: false
runtime:
debug_logging_enabled: false
# enable_validation: false
# enable_expression_caching: true
terminology:
valueset_preexpansion_mode: REQUIRE # USE_IF_PRESENT, REQUIRE, IGNORE
valueset_expansion_mode: PERFORM_NAIVE_EXPANSION # AUTO, USE_EXPANSION_OPERATION, PERFORM_NAIVE_EXPANSION
valueset_membership_mode: USE_EXPANSION # AUTO, USE_VALIDATE_CODE_OPERATION, USE_EXPANSION
code_lookup_mode: USE_VALIDATE_CODE_OPERATION # AUTO, USE_VALIDATE_CODE_OPERATION, USE_CODESYSTEM_URL
data:
search_parameter_mode: USE_SEARCH_PARAMETERS # AUTO, USE_SEARCH_PARAMETERS, FILTER_IN_MEMORY
terminology_parameter_mode: FILTER_IN_MEMORY # AUTO, USE_VALUE_SET_URL, USE_INLINE_CODES, FILTER_IN_MEMORY
profile_mode: DECLARED # ENFORCED, DECLARED, OPTIONAL, TRUST, OFF
cdshooks:
enabled: false
clientIdHeaderName: client_id
### This enables the swagger-ui at /fhir/swagger-ui/index.html as well as the /fhir/api-docs (see https://hapifhir.io/hapi-fhir/docs/server_plain/openapi.html)
openapi_enabled: true
### This is the FHIR version. Choose between, DSTU2, DSTU3, R4 or R5
fhir_version: R4
### Flag is false by default. This flag enables runtime installation of IG's.
ig_runtime_upload_enabled: false
### This flag when enabled to true, will avail evaluate measure operations from CR Module.
### enable to use the ApacheProxyAddressStrategy which uses X-Forwarded-* headers
### to determine the FHIR server address
# use_apache_address_strategy: false
### forces the use of the https:// protocol for the returned server address.
### alternatively, it may be set using the X-Forwarded-Proto header.
# use_apache_address_strategy_https: false
### enables the server to overwrite defaults on HTML, css, etc. under the url pattern of eg. /content/custom **
### Folder with custom content MUST be named custom. If omitted then default content applies
custom_content_path: ./custom
### enables the server host custom content. If e.g. the value ./configs/app is supplied then the content
### will be served under /web/app
#app_content_path: ./configs/app
### enable to set the Server URL
# server_address: http://hapi.fhir.org/baseR4
# defer_indexing_for_codesystems_of_size: 101
### Flag is true by default. This flag filters resources during package installation, allowing only those resources with a valid status (e.g. active) to be installed.
# validate_resource_status_for_package_upload: false
# install_transitive_ig_dependencies: true
#implementationguides:
### example from registry (packages.fhir.org)
# swiss:
# name: swiss.mednet.fhir
# version: 0.8.0
# reloadExisting: false
# installMode: STORE_AND_INSTALL
# example not from registry
# ips_1_0_0:
# packageUrl: https://build.fhir.org/ig/HL7/fhir-ips/package.tgz
# name: hl7.fhir.uv.ips
# version: 1.0.0
# supported_resource_types:
# - Patient
# - Observation
##################################################
# Allowed Bundle Types for persistence (defaults are: COLLECTION,DOCUMENT,MESSAGE)
##################################################
# allowed_bundle_types: COLLECTION,DOCUMENT,MESSAGE,TRANSACTION,TRANSACTIONRESPONSE,BATCH,BATCHRESPONSE,HISTORY,SEARCHSET
# allow_cascading_deletes: true
# allow_contains_searches: true
# allow_external_references: true
# allow_multiple_delete: true
# allow_override_default_search_params: true
# auto_create_placeholder_reference_targets: false
# mass_ingestion_mode_enabled: false
### tells the server to automatically append the current version of the target resource to references at these paths
# auto_version_reference_at_paths: Device.patient, Device.location, Device.parent, DeviceMetric.parent, DeviceMetric.source, Observation.device, Observation.subject
# ips_enabled: false
# default_encoding: JSON
# default_pretty_print: true
# default_page_size: 20
# delete_expunge_enabled: true
# enable_repository_validating_interceptor: true
# enable_index_missing_fields: false
# enable_index_of_type: true
# enable_index_contained_resource: false
# upliftedRefchains_enabled: true
# resource_dbhistory_enabled: false
### !!Extended Lucene/Elasticsearch Indexing is still a experimental feature, expect some features (e.g. _total=accurate) to not work as expected!!
### more information here: https://hapifhir.io/hapi-fhir/docs/server_jpa/elastic.html
advanced_lucene_indexing: false
bulk_export_enabled: false
bulk_import_enabled: false
# language_search_parameter_enabled: true
# enforce_referential_integrity_on_delete: false
# This is an experimental feature, and does not fully support _total and other FHIR features.
# enforce_referential_integrity_on_delete: false
# enforce_referential_integrity_on_write: false
# etag_support_enabled: true
# expunge_enabled: true
# client_id_strategy: ALPHANUMERIC
# server_id_strategy: SEQUENTIAL_NUMERIC
# fhirpath_interceptor_enabled: false
# filter_search_enabled: true
# graphql_enabled: true
narrative_enabled: true
mdm_enabled: false
mdm_rules_json_location: "mdm-rules.json"
## see: https://hapifhir.io/hapi-fhir/docs/interceptors/built_in_server_interceptors.html#jpa-server-retry-on-version-conflicts
# userRequestRetryVersionConflictsInterceptorEnabled : false
# local_base_urls:
# - https://hapi.fhir.org/baseR4
# pre_expand_value_sets: true
# enable_task_pre_expand_value_sets: true
# pre_expand_value_sets_default_count: 1000
# pre_expand_value_sets_max_count: 1000
# maximum_expansion_size: 1000
logical_urls:
- http://terminology.hl7.org/*
- https://terminology.hl7.org/*
- http://snomed.info/*
- https://snomed.info/*
- http://unitsofmeasure.org/*
- https://unitsofmeasure.org/*
- http://loinc.org/*
- https://loinc.org/*
# partitioning:
# allow_references_across_partitions: false
# partitioning_include_in_search_hashes: false
# conditional_create_duplicate_identifiers_enabled: false
cors:
allow_Credentials: true
# These are allowed_origin patterns, see: https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/cors/CorsConfiguration.html#setAllowedOriginPatterns-java.util.List-
allowed_origin:
- '*'
# Search coordinator thread pool sizes
search-coord-core-pool-size: 20
search-coord-max-pool-size: 100
search-coord-queue-capacity: 200
# Search Prefetch Thresholds.
# This setting sets the number of search results to prefetch. For example, if this list
# is set to [100, 1000, -1] then the server will initially load 100 results and not
# attempt to load more. If the user requests subsequent page(s) of results and goes
# past 100 results, the system will load the next 900 (up to the following threshold of 1000).
# The system will progressively work through these thresholds.
# A threshold of -1 means to load all results. Note that if the final threshold is a
# number other than -1, the system will never prefetch more than the given number.
search_prefetch_thresholds: 13,503,2003,-1
# comma-separated package names, will be @ComponentScan'ed by Spring to allow for creating custom Spring beans
#custom-bean-packages:
# comma-separated list of fully qualified interceptor classes.
# classes listed here will be fetched from the Spring context when combined with 'custom-bean-packages',
# or will be instantiated via reflection using an no-arg contructor; then registered with the server
#custom-interceptor-classes:
# comma-separated list of fully qualified provider classes.
# classes listed here will be fetched from the Spring context when combined with 'custom-bean-packages',
# or will be instantiated via reflection using an no-arg contructor; then registered with the server
#custom-provider-classes:
# Threadpool size for BATCH'ed GETs in a bundle.
# bundle_batch_pool_size: 10
# bundle_batch_pool_max_size: 50
# logger:
# error_format: 'ERROR - ${requestVerb} ${requestUrl}'
# format: >-
# Path[${servletPath}] Source[${requestHeader.x-forwarded-for}]
# Operation[${operationType} ${operationName} ${idOrResourceName}]
# UA[${requestHeader.user-agent}] Params[${requestParameters}]
# ResponseEncoding[${responseEncodingNoDefault}]
# log_exceptions: true
# name: fhirtest.access
# max_binary_size: 104857600
# max_page_size: 200
# retain_cached_searches_mins: 60
# reuse_cached_search_results_millis: 60000
tester:
home:
name: FHIRFLARE Tester
server_address: http://localhost:8080/fhir
refuse_to_fetch_third_party_urls: false
fhir_version: R4
global:
name: Global Tester
server_address: "http://hapi.fhir.org/baseR4"
refuse_to_fetch_third_party_urls: false
fhir_version: R4
# validation:
# requests_enabled: true
# responses_enabled: true
# binary_storage_enabled: true
inline_resource_storage_below_size: 4000
# bulk_export_enabled: true
# subscription:
# resthook_enabled: true
# websocket_enabled: false
# polling_interval_ms: 5000
# immediately_queued: false
# email:
# from: some@test.com
# host: google.com
# port:
# username:
# password:
# auth:
# startTlsEnable:
# startTlsRequired:
# quitWait:
# lastn_enabled: true
# store_resource_in_lucene_index_enabled: true
### This is configuration for normalized quantity search level default is 0
### 0: NORMALIZED_QUANTITY_SEARCH_NOT_SUPPORTED - default
### 1: NORMALIZED_QUANTITY_STORAGE_SUPPORTED
### 2: NORMALIZED_QUANTITY_SEARCH_SUPPORTED
# normalized_quantity_search_level: 2
#elasticsearch:
# debug:
# pretty_print_json_log: false
# refresh_after_write: false
# enabled: false
# password: SomePassword
# required_index_status: YELLOW
# rest_url: 'localhost:9200'
# protocol: 'http'
# schema_management_strategy: CREATE
# username: SomeUsername