Delete hapi-fhir-jpaserver directory

This commit is contained in:
Joshua Hare 2025-04-17 17:35:30 +10:00 committed by GitHub
parent dc21d202c7
commit 39e713a07f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 0 additions and 3083 deletions

View File

@ -1,50 +0,0 @@
FROM docker.io/library/maven:3.9.9-eclipse-temurin-17 AS build-hapi
WORKDIR /tmp/hapi-fhir-jpaserver-starter
ARG OPENTELEMETRY_JAVA_AGENT_VERSION=1.33.3
RUN curl -LSsO https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v${OPENTELEMETRY_JAVA_AGENT_VERSION}/opentelemetry-javaagent.jar
COPY pom.xml .
COPY server.xml .
RUN mvn -ntp dependency:go-offline
COPY src/ /tmp/hapi-fhir-jpaserver-starter/src/
RUN mvn clean install -DskipTests -Djdk.lang.Process.launchMechanism=vfork
FROM build-hapi AS build-distroless
RUN mvn package -DskipTests spring-boot:repackage -Pboot
RUN mkdir /app && cp /tmp/hapi-fhir-jpaserver-starter/target/ROOT.war /app/main.war
########### bitnami tomcat version is suitable for debugging and comes with a shell
########### it can be built using eg. `docker build --target tomcat .`
FROM bitnami/tomcat:10.1 AS tomcat
USER root
RUN rm -rf /opt/bitnami/tomcat/webapps/ROOT && \
mkdir -p /opt/bitnami/hapi/data/hapi/lucenefiles && \
chown -R 1001:1001 /opt/bitnami/hapi/data/hapi/lucenefiles && \
chmod 775 /opt/bitnami/hapi/data/hapi/lucenefiles
RUN mkdir -p /target && chown -R 1001:1001 target
USER 1001
COPY --chown=1001:1001 catalina.properties /opt/bitnami/tomcat/conf/catalina.properties
COPY --chown=1001:1001 server.xml /opt/bitnami/tomcat/conf/server.xml
COPY --from=build-hapi --chown=1001:1001 /tmp/hapi-fhir-jpaserver-starter/target/ROOT.war /opt/bitnami/tomcat/webapps/ROOT.war
COPY --from=build-hapi --chown=1001:1001 /tmp/hapi-fhir-jpaserver-starter/opentelemetry-javaagent.jar /app
ENV ALLOW_EMPTY_PASSWORD=yes
########### distroless brings focus on security and runs on plain spring boot - this is the default image
FROM gcr.io/distroless/java17-debian12:nonroot AS default
# 65532 is the nonroot user's uid
# used here instead of the name to allow Kubernetes to easily detect that the container
# is running as a non-root (uid != 0) user.
USER 65532:65532
WORKDIR /app
COPY --chown=nonroot:nonroot --from=build-distroless /app /app
COPY --chown=nonroot:nonroot --from=build-hapi /tmp/hapi-fhir-jpaserver-starter/opentelemetry-javaagent.jar /app
ENTRYPOINT ["java", "--class-path", "/app/main.war", "-Dloader.path=main.war!/WEB-INF/classes/,main.war!/WEB-INF/,/app/extra-classes", "org.springframework.boot.loader.PropertiesLauncher"]

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,591 +0,0 @@
# HAPI-FHIR Starter Project
This project is a complete starter project you can use to deploy a FHIR server using HAPI FHIR JPA.
Note that this project is specifically intended for end users of the HAPI FHIR JPA server module (in other words, it helps you implement HAPI FHIR, it is not the source of the library itself). If you are looking for the main HAPI FHIR project, see here: https://github.com/hapifhir/hapi-fhir
While this project shows how you can use many parts of the HAPI FHIR framework there are a set of features which you should be aware of are missing or something you need to supply yourself or get professional support ahead of using it directly in production:
1) The service comes with no security implementation. See how it can be done [here](https://hapifhir.io/hapi-fhir/docs/security/introduction.html)
2) The service comes with no enterprise logging. See how it can be done [here](https://hapifhir.io/hapi-fhir/docs/security/balp_interceptor.html)
3) The internal topic cache used by subscriptions in HAPI FHIR are not shared across multiple instances as the [default supplied implementation is in-mem](https://github.com/hapifhir/hapi-fhir/blob/master/hapi-fhir-jpaserver-subscription/src/main/java/ca/uhn/fhir/jpa/topic/ActiveSubscriptionTopicCache.java)
4) The internal message broker channel in HAPI FHIR is not shared across multiple instances as the [default supplied implementation is in-mem](https://github.com/hapifhir/hapi-fhir/blob/master/hapi-fhir-storage/src/main/java/ca/uhn/fhir/jpa/subscription/channel/api/IChannelFactory.java). This impacts the use of modules listed [here](https://smilecdr.com/docs/installation/message_broker.html#modules-dependent-on-message-brokers)
Need Help? Please see: https://github.com/hapifhir/hapi-fhir/wiki/Getting-Help
## Prerequisites
In order to use this sample, you should have:
- [This project](https://github.com/hapifhir/hapi-fhir-jpaserver-starter) checked out. You may wish to create a GitHub Fork of the project and check that out instead so that you can customize the project and save the results to GitHub.
### and either
- Oracle Java (JDK) installed: Minimum JDK17 or newer.
- Apache Maven build tool (newest version)
### or
- Docker, as the entire project can be built using multistage docker (with both JDK and maven wrapped in docker) or used directly from [Docker Hub](https://hub.docker.com/r/hapiproject/hapi)
## Running via [Docker Hub](https://hub.docker.com/r/hapiproject/hapi)
Each tagged/released version of `hapi-fhir-jpaserver` is built as a Docker image and published to Docker hub. To run the published Docker image from DockerHub:
```
docker pull hapiproject/hapi:latest
docker run -p 8080:8080 hapiproject/hapi:latest
```
This will run the docker image with the default configuration, mapping port 8080 from the container to port 8080 in the host. Once running, you can access `http://localhost:8080/` in the browser to access the HAPI FHIR server's UI or use `http://localhost:8080/fhir/` as the base URL for your REST requests.
If you change the mapped port, you need to change the configuration used by HAPI to have the correct `hapi.fhir.tester` property/value.
### Configuration via environment variables
You can customize HAPI directly from the `run` command using environment variables. For example:
```
docker run -p 8080:8080 -e hapi.fhir.default_encoding=xml hapiproject/hapi:latest
```
HAPI looks in the environment variables for properties in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) file for defaults.
### Configuration via overridden application.yaml file and using Docker
You can customize HAPI by telling HAPI to look for the configuration file in a different location, eg.:
```
docker run -p 8090:8080 -v $(pwd)/yourLocalFolder:/configs -e "--spring.config.location=file:///configs/another.application.yaml" hapiproject/hapi:latest
```
Here, the configuration file (*another.application.yaml*) is placed locally in the folder *yourLocalFolder*.
```
docker run -p 8090:8080 -e "--spring.config.location=classpath:/another.application.yaml" hapiproject/hapi:latest
```
Here, the configuration file (*another.application.yaml*) is part of the compiled set of resources.
### Example using ``docker-compose.yml`` for docker-compose
```yaml
version: '3.7'
services:
fhir:
container_name: fhir
image: "hapiproject/hapi:latest"
ports:
- "8080:8080"
configs:
- source: hapi
target: /app/config/application.yaml
depends_on:
- db
db:
image: postgres
restart: always
environment:
POSTGRES_PASSWORD: admin
POSTGRES_USER: admin
POSTGRES_DB: hapi
volumes:
- ./hapi.postgress.data:/var/lib/postgresql/data
configs:
hapi:
file: ./hapi.application.yaml
```
Provide the following content in ``./hapi.application.yaml``:
```yaml
spring:
datasource:
url: 'jdbc:postgresql://db:5432/hapi'
username: admin
password: admin
driverClassName: org.postgresql.Driver
jpa:
properties:
hibernate.dialect: ca.uhn.fhir.jpa.model.dialect.HapiFhirPostgresDialect
hibernate.search.enabled: false
```
### Example running custom interceptor using docker-compose
This example is an extension of the above one, now adding a custom interceptor.
```yaml
version: '3.7'
services:
fhir:
container_name: fhir
image: "hapiproject/hapi:latest"
ports:
- "8080:8080"
configs:
- source: hapi
target: /app/config/application.yaml
- source: hapi-extra-classes
target: /app/extra-classes
depends_on:
- db
db:
image: postgres
restart: always
environment:
POSTGRES_PASSWORD: admin
POSTGRES_USER: admin
POSTGRES_DB: hapi
volumes:
- ./hapi.postgress.data:/var/lib/postgresql/data
configs:
hapi:
file: ./hapi.application.yaml
hapi-extra-classes:
file: ./hapi-extra-classes
```
Provide the following content in ``./hapi.application.yaml``:
```yaml
spring:
datasource:
url: 'jdbc:postgresql://db:5432/hapi'
username: admin
password: admin
driverClassName: org.postgresql.Driver
jpa:
properties:
hibernate.dialect: ca.uhn.fhir.jpa.model.dialect.HapiFhirPostgresDialect
hibernate.search.enabled: false
hapi:
fhir:
custom-bean-packages: the.package.containing.your.interceptor
custom-interceptor-classes: the.package.containing.your.interceptor.YourInterceptor
```
The basic interceptor structure would be like this:
```java
package the.package.containing.your.interceptor;
import org.hl7.fhir.instance.model.api.IBaseResource;
import org.springframework.stereotype.Component;
import ca.uhn.fhir.interceptor.api.Hook;
import ca.uhn.fhir.interceptor.api.Interceptor;
import ca.uhn.fhir.interceptor.api.Pointcut;
@Component
@Interceptor
public class YourInterceptor
{
@Hook(Pointcut.STORAGE_PRECOMMIT_RESOURCE_CREATED)
public void resourceCreated(IBaseResource newResource)
{
System.out.println("YourInterceptor.resourceCreated");
}
}
```
## Running locally
The easiest way to run this server entirely depends on your environment requirements. The following ways are supported:
### Using jetty
```bash
mvn -Pjetty spring-boot:run
```
The Server will then be accessible at http://localhost:8080/fhir and the CapabilityStatement will be found at http://localhost:8080/fhir/metadata.
### Using Spring Boot
```bash
mvn spring-boot:run
```
The Server will then be accessible at http://localhost:8080/fhir and the CapabilityStatement will be found at http://localhost:8080/fhir/metadata.
If you want to run this server on a different port, you can change the port in the `src/main/resources/application.yaml` file as follows:
```yaml
server:
# servlet:
# context-path: /example/path
port: 8888
```
The Server will then be accessible at http://localhost:8888/fhir and the CapabilityStatement will be found at http://localhost:8888/fhir/metadata. Remember to adjust your overlay configuration in the `application.yaml` file to the following:
```yaml
tester:
-
id: home
name: Local Tester
server_address: 'http://localhost:8888/fhir'
refuse_to_fetch_third_party_urls: false
fhir_version: R4
```
### Using Spring Boot with :run
```bash
mvn clean spring-boot:run -Pboot
```
Server will then be accessible at http://localhost:8080/ and eg. http://localhost:8080/fhir/metadata. Remember to adjust you overlay configuration in the application.yaml to the following:
```yaml
tester:
-
id: home
name: Local Tester
server_address: 'http://localhost:8080/fhir'
refuse_to_fetch_third_party_urls: false
fhir_version: R4
```
### Using Spring Boot
```bash
mvn clean package spring-boot:repackage -DskipTests=true -Pboot && java -jar target/ROOT.war
```
Server will then be accessible at http://localhost:8080/ and eg. http://localhost:8080/fhir/metadata. Remember to adjust your overlay configuration in the application.yaml to the following:
```yaml
tester:
-
id: home
name: Local Tester
server_address: 'http://localhost:8080/fhir'
refuse_to_fetch_third_party_urls: false
fhir_version: R4
```
### Using Spring Boot and Google distroless
```bash
mvn clean package com.google.cloud.tools:jib-maven-plugin:dockerBuild -Dimage=distroless-hapi && docker run -p 8080:8080 distroless-hapi
```
Server will then be accessible at http://localhost:8080/ and eg. http://localhost:8080/fhir/metadata. Remember to adjust your overlay configuration in the application.yaml to the following:
```yaml
tester:
-
id: home
name: Local Tester
server_address: 'http://localhost:8080/fhir'
refuse_to_fetch_third_party_urls: false
fhir_version: R4
```
### Using the Dockerfile and multistage build
```bash
./build-docker-image.sh && docker run -p 8080:8080 hapi-fhir/hapi-fhir-jpaserver-starter:latest
```
Server will then be accessible at http://localhost:8080/ and eg. http://localhost:8080/fhir/metadata. Remember to adjust your overlay configuration in the application.yaml to the following:
```yaml
tester:
-
id: home
name: Local Tester
server_address: 'http://localhost:8080/fhir'
refuse_to_fetch_third_party_urls: false
fhir_version: R4
```
## Configurations
Much of this HAPI starter project can be configured using the yaml file in _src/main/resources/application.yaml_. By default, this starter project is configured to use H2 as the database.
### MySQL configuration
HAPI FHIR JPA Server does not support MySQL as it is deprecated.
See more at https://hapifhir.io/hapi-fhir/docs/server_jpa/database_support.html
### PostgreSQL configuration
To configure the starter app to use PostgreSQL, instead of the default H2, update the application.yaml file to have the following:
```yaml
spring:
datasource:
url: 'jdbc:postgresql://localhost:5432/hapi'
username: admin
password: admin
driverClassName: org.postgresql.Driver
jpa:
properties:
hibernate.dialect: ca.uhn.fhir.jpa.model.dialect.HapiFhirPostgresDialect
hibernate.search.enabled: false
# Then comment all hibernate.search.backend.*
```
Because the integration tests within the project rely on the default H2 database configuration, it is important to either explicitly skip the integration tests during the build process, i.e., `mvn install -DskipTests`, or delete the tests altogether. Failure to skip or delete the tests once you've configured PostgreSQL for the datasource.driver, datasource.url, and hibernate.dialect as outlined above will result in build errors and compilation failure.
### Microsoft SQL Server configuration
To configure the starter app to use MS SQL Server, instead of the default H2, update the application.yaml file to have the following:
```yaml
spring:
datasource:
url: 'jdbc:sqlserver://<server>:<port>;databaseName=<databasename>'
username: admin
password: admin
driverClassName: com.microsoft.sqlserver.jdbc.SQLServerDriver
```
Also, make sure you are not setting the Hibernate dialect explicitly, in other words, remove any lines similar to:
```
hibernate.dialect: {some none Microsoft SQL dialect}
```
Because the integration tests within the project rely on the default H2 database configuration, it is important to either explicitly skip the integration tests during the build process, i.e., `mvn install -DskipTests`, or delete the tests altogether. Failure to skip or delete the tests once you've configured PostgreSQL for the datasource.driver, datasource.url, and hibernate.dialect as outlined above will result in build errors and compilation failure.
NOTE: MS SQL Server by default uses a case-insensitive codepage. This will cause errors with some operations - such as when expanding case-sensitive valuesets (UCUM) as there are unique indexes defined on the terminology tables for codes.
It is recommended to deploy a case-sensitive database prior to running HAPI FHIR when using MS SQL Server to avoid these and potentially other issues.
## Adding custom interceptors
Custom interceptors can be registered with the server by including the property `hapi.fhir.custom-interceptor-classes`. This will take a comma separated list of fully-qualified class names which will be registered with the server.
Interceptors will be discovered in one of two ways:
1) discovered from the Spring application context as existing Beans (can be used in conjunction with `hapi.fhir.custom-bean-packages`) or registered with Spring via other methods
or
2) classes will be instantiated via reflection if no matching Bean is found
## Adding custom operations(providers)
Custom operations(providers) can be registered with the server by including the property `hapi.fhir.custom-provider-classes`. This will take a comma separated list of fully-qualified class names which will be registered with the server.
Providers will be discovered in one of two ways:
1) discovered from the Spring application context as existing Beans (can be used in conjunction with `hapi.fhir.custom-bean-packages`) or registered with Spring via other methods
or
2) classes will be instantiated via reflection if no matching Bean is found
## Customizing The Web Testpage UI
The UI that comes with this server is an exact clone of the server available at [http://hapi.fhir.org](http://hapi.fhir.org). You may skin this UI if you'd like. For example, you might change the introductory text or replace the logo with your own.
The UI is customized using [Thymeleaf](https://www.thymeleaf.org/) template files. You might want to learn more about Thymeleaf, but you don't necessarily need to: they are quite easy to figure out.
Several template files that can be customized are found in the following directory: [https://github.com/hapifhir/hapi-fhir-jpaserver-starter/tree/master/src/main/webapp/WEB-INF/templates](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/tree/master/src/main/webapp/WEB-INF/templates)
## Deploying to an Application Server
Using the Maven-Embedded Jetty method above is convenient, but it is not a good solution if you want to leave the server running in the background.
Most people who are using HAPI FHIR JPA as a server that is accessible to other people (whether internally on your network or publically hosted) will do so using an Application Server, such as [Apache Tomcat](http://tomcat.apache.org/) or [Jetty](https://www.eclipse.org/jetty/). Note that any Servlet 3.0+ compatible Web Container will work (e.g Wildfly, Websphere, etc.).
Tomcat is very popular, so it is a good choice simply because you will be able to find many tutorials online. Jetty is a great alternative due to its fast startup time and good overall performance.
To deploy to a container, you should first build the project:
```bash
mvn clean install
```
This will create a file called `ROOT.war` in your `target` directory. This should be installed in your Web Container according to the instructions for your particular container. For example, if you are using Tomcat, you will want to copy this file to the `webapps/` directory.
Again, browse to the following link to use the server (note that the port 8080 may not be correct depending on how your server is configured).
[http://localhost:8080/](http://localhost:8080/)
You will then be able to access the JPA server e.g. using http://localhost:8080/fhir/metadata.
If you would like it to be hosted at eg. hapi-fhir-jpaserver, eg. http://localhost:8080/hapi-fhir-jpaserver/ or http://localhost:8080/hapi-fhir-jpaserver/fhir/metadata - then rename the WAR file to ```hapi-fhir-jpaserver.war``` and adjust the overlay configuration accordingly e.g.
```yaml
tester:
-
id: home
name: Local Tester
server_address: 'http://localhost:8080/hapi-fhir-jpaserver/fhir'
refuse_to_fetch_third_party_urls: false
fhir_version: R4
```
## Deploy with docker compose
Docker compose is a simple option to build and deploy containers. To deploy with docker compose, you should build the project
with `mvn clean install` and then bring up the containers with `docker-compose up -d --build`. The server can be
reached at http://localhost:8080/.
In order to use another port, change the `ports` parameter
inside `docker-compose.yml` to `8888:8080`, where 8888 is a port of your choice.
The docker compose set also includes PostgreSQL database, if you choose to use PostgreSQL instead of H2, change the following
properties in `src/main/resources/application.yaml`:
```yaml
spring:
datasource:
url: 'jdbc:postgresql://hapi-fhir-postgres:5432/hapi'
username: admin
password: admin
driverClassName: org.postgresql.Driver
jpa:
properties:
hibernate.dialect: ca.uhn.fhir.jpa.model.dialect.HapiFhirPostgresDialect
hibernate.search.enabled: false
# Then comment all hibernate.search.backend.*
```
## Running hapi-fhir-jpaserver directly from IntelliJ as Spring Boot
Make sure you run with the maven profile called ```boot``` and NOT also ```jetty```. Then you are ready to press debug the project directly without any extra Application Servers.
## Running hapi-fhir-jpaserver-example in Tomcat from IntelliJ
Install Tomcat.
Make sure you have Tomcat set up in IntelliJ.
- File->Settings->Build, Execution, Deployment->Application Servers
- Click +
- Select "Tomcat Server"
- Enter the path to your tomcat deployment for both Tomcat Home (IntelliJ will fill in base directory for you)
Add a Run Configuration for running hapi-fhir-jpaserver-example under Tomcat
- Run->Edit Configurations
- Click the green +
- Select Tomcat Server, Local
- Change the name to whatever you wish
- Uncheck the "After launch" checkbox
- On the "Deployment" tab, click the green +
- Select "Artifact"
- Select "hapi-fhir-jpaserver-example:war"
- In "Application context" type /hapi
Run the configuration.
- You should now have an "Application Servers" in the list of windows at the bottom.
- Click it.
- Select your server, and click the green triangle (or the bug if you want to debug)
- Wait for the console output to stop
Point your browser (or fiddler, or what have you) to `http://localhost:8080/hapi/baseDstu3/Patient`
## Enabling Subscriptions
The server may be configured with subscription support by enabling properties in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) file:
- `hapi.fhir.subscription.resthook_enabled` - Enables REST Hook subscriptions, where the server will make an outgoing connection to a remote REST server
- `hapi.fhir.subscription.email.*` - Enables email subscriptions. Note that you must also provide the connection details for a usable SMTP server.
- `hapi.fhir.subscription.websocket_enabled` - Enables websocket subscriptions. With this enabled, your server will accept incoming websocket connections on the following URL (this example uses the default context path and port, you may need to tweak depending on your deployment environment): [ws://localhost:8080/websocket](ws://localhost:8080/websocket)
## Enabling Clinical Reasoning
Set `hapi.fhir.cr.enabled=true` in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) file to enable [Clinical Quality Language](https://cql.hl7.org/) on this server. An alternate settings file, [cds.application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/cds.application.yaml), exists with the Clinical Reasoning module enabled and default settings that have been found to work with most CDS and dQM test cases.
## Enabling CDS Hooks
Set `hapi.fhir.cdshooks.enabled=true` in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) file to enable [CDS Hooks](https://cds-hooks.org/) on this server. The Clinical Reasoning module must also be enabled because this implementation of CDS Hooks includes [CDS on FHIR](https://build.fhir.org/clinicalreasoning-cds-on-fhir.html). An example CDS Service using CDS on FHIR is available in the CdsHooksServletIT test class.
## Enabling MDM (EMPI)
Set `hapi.fhir.mdm_enabled=true` in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) file to enable MDM on this server. The MDM matching rules are configured in [mdm-rules.json](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/mdm-rules.json). The rules in this example file should be replaced with actual matching rules appropriate to your data. Note that MDM relies on subscriptions, so for MDM to work, subscriptions must be enabled.
## Using Elasticsearch
By default, the server will use embedded lucene indexes for terminology and fulltext indexing purposes. You can switch this to using lucene by editing the properties in [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml)
For example:
```properties
elasticsearch.enabled=true
elasticsearch.rest_url=localhost:9200
elasticsearch.username=SomeUsername
elasticsearch.password=SomePassword
elasticsearch.protocol=http
elasticsearch.required_index_status=YELLOW
elasticsearch.schema_management_strategy=CREATE
```
## Enabling LastN
Set `hapi.fhir.lastn_enabled=true` in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) file to enable the $lastn operation on this server. Note that the $lastn operation relies on Elasticsearch, so for $lastn to work, indexing must be enabled using Elasticsearch.
## Enabling Resource to be stored in Lucene Index
Set `hapi.fhir.store_resource_in_lucene_index_enabled` in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) file to enable storing of resource json along with Lucene/Elasticsearch index mappings.
## Changing cached search results time
It is possible to change the cached search results time. The option `reuse_cached_search_results_millis` in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) is 6000 miliseconds by default.
Set `reuse_cached_search_results_millis: -1` in the [application.yaml](https://github.com/hapifhir/hapi-fhir-jpaserver-starter/blob/master/src/main/resources/application.yaml) file to ignore the cache time every search.
## Build the distroless variant of the image (for lower footprint and improved security)
The default Dockerfile contains a `release-distroless` stage to build a variant of the image
using the `gcr.io/distroless/java-debian10:11` base image:
```sh
docker build --target=release-distroless -t hapi-fhir:distroless .
```
Note that distroless images are also automatically built and pushed to the container registry,
see the `-distroless` suffix in the image tags.
## Adding custom operations
To add a custom operation, refer to the documentation in the core hapi-fhir libraries [here](https://hapifhir.io/hapi-fhir/docs/server_plain/rest_operations_operations.html).
Within `hapi-fhir-jpaserver-starter`, create a generic class (that does not extend or implement any classes or interfaces), add the `@Operation` as a method within the generic class, and then register the class as a provider using `RestfulServer.registerProvider()`.
## Runtime package install
It's possible to install a FHIR Implementation Guide package (`package.tgz`) either from a published package or from a local package with the `$install` operation, without having to restart the server. This is available for R4 and R5.
This feature must be enabled in the application.yaml (or docker command line):
```yaml
hapi:
fhir:
ig_runtime_upload_enabled: true
```
The `$install` operation is triggered with a POST to `[server]/ImplementationGuide/$install`, with the payload below:
```json
{
"resourceType": "Parameters",
"parameter": [
{
"name": "npmContent",
"valueBase64Binary": "[BASE64_ENCODED_NPM_PACKAGE_DATA]"
}
]
}
```
## Enable OpenTelemetry auto-instrumentation
The container image includes the [OpenTelemetry Java auto-instrumentation](https://github.com/open-telemetry/opentelemetry-java-instrumentation)
Java agent JAR which can be used to export telemetry data for the HAPI FHIR JPA Server. You can enable it by specifying the `-javaagent` flag,
for example by overriding the `JAVA_TOOL_OPTIONS` environment variable:
```sh
docker run --rm -it -p 8080:8080 \
-e JAVA_TOOL_OPTIONS="-javaagent:/app/opentelemetry-javaagent.jar" \
-e OTEL_TRACES_EXPORTER="jaeger" \
-e OTEL_SERVICE_NAME="hapi-fhir-server" \
-e OTEL_EXPORTER_JAEGER_ENDPOINT="http://jaeger:14250" \
docker.io/hapiproject/hapi:latest
```
You can configure the agent using environment variables or Java system properties, see <https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/> for details.

View File

@ -1 +0,0 @@
docker build -t hapi-fhir/hapi-fhir-jpaserver-starter .

View File

@ -1,4 +0,0 @@
#!/bin/sh
docker build -t hapi-fhir/hapi-fhir-jpaserver-starter .

View File

@ -1,209 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# List of comma-separated packages that start with or equal this string
# will cause a security exception to be thrown when
# passed to checkPackageAccess unless the
# corresponding RuntimePermission ("accessClassInPackage."+package) has
# been granted.
package.access=sun.,org.apache.catalina.,org.apache.coyote.,org.apache.jasper.,org.apache.tomcat.
#
# List of comma-separated packages that start with or equal this string
# will cause a security exception to be thrown when
# passed to checkPackageDefinition unless the
# corresponding RuntimePermission ("defineClassInPackage."+package) has
# been granted.
#
# by default, no packages are restricted for definition, and none of
# the class loaders supplied with the JDK call checkPackageDefinition.
#
package.definition=sun.,java.,org.apache.catalina.,org.apache.coyote.,\
org.apache.jasper.,org.apache.naming.,org.apache.tomcat.
#
#
# List of comma-separated paths defining the contents of the "common"
# classloader. Prefixes should be used to define what is the repository type.
# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute.
# If left as blank,the JVM system loader will be used as Catalina's "common"
# loader.
# Examples:
# "foo": Add this folder as a class repository
# "foo/*.jar": Add all the JARs of the specified folder as class
# repositories
# "foo/bar.jar": Add bar.jar as a class repository
#
# Note: Values are enclosed in double quotes ("...") in case either the
# ${catalina.base} path or the ${catalina.home} path contains a comma.
# Because double quotes are used for quoting, the double quote character
# may not appear in a path.
common.loader="${catalina.base}/lib","${catalina.base}/lib/*.jar","${catalina.home}/lib","${catalina.home}/lib/*.jar"
#
# List of comma-separated paths defining the contents of the "server"
# classloader. Prefixes should be used to define what is the repository type.
# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute.
# If left as blank, the "common" loader will be used as Catalina's "server"
# loader.
# Examples:
# "foo": Add this folder as a class repository
# "foo/*.jar": Add all the JARs of the specified folder as class
# repositories
# "foo/bar.jar": Add bar.jar as a class repository
#
# Note: Values may be enclosed in double quotes ("...") in case either the
# ${catalina.base} path or the ${catalina.home} path contains a comma.
# Because double quotes are used for quoting, the double quote character
# may not appear in a path.
server.loader=
#
# List of comma-separated paths defining the contents of the "shared"
# classloader. Prefixes should be used to define what is the repository type.
# Path may be relative to the CATALINA_BASE path or absolute. If left as blank,
# the "common" loader will be used as Catalina's "shared" loader.
# Examples:
# "foo": Add this folder as a class repository
# "foo/*.jar": Add all the JARs of the specified folder as class
# repositories
# "foo/bar.jar": Add bar.jar as a class repository
# Please note that for single jars, e.g. bar.jar, you need the URL form
# starting with file:.
#
# Note: Values may be enclosed in double quotes ("...") in case either the
# ${catalina.base} path or the ${catalina.home} path contains a comma.
# Because double quotes are used for quoting, the double quote character
# may not appear in a path.
shared.loader=
# Default list of JAR files that should not be scanned using the JarScanner
# functionality. This is typically used to scan JARs for configuration
# information. JARs that do not contain such information may be excluded from
# the scan to speed up the scanning process. This is the default list. JARs on
# this list are excluded from all scans. The list must be a comma separated list
# of JAR file names.
# The list of JARs to skip may be over-ridden at a Context level for individual
# scan types by configuring a JarScanner with a nested JarScanFilter.
# The JARs listed below include:
# - Tomcat Bootstrap JARs
# - Tomcat API JARs
# - Catalina JARs
# - Jasper JARs
# - Tomcat JARs
# - Common non-Tomcat JARs
# - Test JARs (JUnit, Cobertura and dependencies)
tomcat.util.scan.StandardJarScanFilter.jarsToSkip=\
annotations-api.jar,\
ant-junit*.jar,\
ant-launcher.jar,\
ant.jar,\
asm-*.jar,\
aspectj*.jar,\
bootstrap.jar,\
catalina-ant.jar,\
catalina-ha.jar,\
catalina-ssi.jar,\
catalina-storeconfig.jar,\
catalina-tribes.jar,\
catalina.jar,\
cglib-*.jar,\
cobertura-*.jar,\
commons-beanutils*.jar,\
commons-codec*.jar,\
commons-collections*.jar,\
commons-daemon.jar,\
commons-dbcp*.jar,\
commons-digester*.jar,\
commons-fileupload*.jar,\
commons-httpclient*.jar,\
commons-io*.jar,\
commons-lang*.jar,\
commons-logging*.jar,\
commons-math*.jar,\
commons-pool*.jar,\
dom4j-*.jar,\
easymock-*.jar,\
ecj-*.jar,\
el-api.jar,\
geronimo-spec-jaxrpc*.jar,\
h2*.jar,\
hamcrest-*.jar,\
hibernate*.jar,\
httpclient*.jar,\
icu4j-*.jar,\
jasper-el.jar,\
jasper.jar,\
jaspic-api.jar,\
jaxb-*.jar,\
jaxen-*.jar,\
jdom-*.jar,\
jetty-*.jar,\
jmx-tools.jar,\
jmx.jar,\
jsp-api.jar,\
jstl.jar,\
jta*.jar,\
junit-*.jar,\
junit.jar,\
log4j*.jar,\
mail*.jar,\
objenesis-*.jar,\
oraclepki.jar,\
oro-*.jar,\
servlet-api-*.jar,\
servlet-api.jar,\
slf4j*.jar,\
taglibs-standard-spec-*.jar,\
tagsoup-*.jar,\
tomcat-api.jar,\
tomcat-coyote.jar,\
tomcat-dbcp.jar,\
tomcat-i18n-*.jar,\
tomcat-jdbc.jar,\
tomcat-jni.jar,\
tomcat-juli-adapters.jar,\
tomcat-juli.jar,\
tomcat-util-scan.jar,\
tomcat-util.jar,\
tomcat-websocket.jar,\
tools.jar,\
websocket-api.jar,\
wsdl4j*.jar,\
xercesImpl.jar,\
xml-apis.jar,\
xmlParserAPIs-*.jar,\
xmlParserAPIs.jar,\
xom-*.jar
# Default list of JAR files that should be scanned that overrides the default
# jarsToSkip list above. This is typically used to include a specific JAR that
# has been excluded by a broad file name pattern in the jarsToSkip list.
# The list of JARs to scan may be over-ridden at a Context level for individual
# scan types by configuring a JarScanner with a nested JarScanFilter.
tomcat.util.scan.StandardJarScanFilter.jarsToScan=\
log4j-taglib*.jar,\
log4j-web*.jar,\
log4javascript*.jar,\
slf4j-taglib*.jar
# String cache configuration.
tomcat.util.buf.StringCache.byte.enabled=true
#tomcat.util.buf.StringCache.char.enabled=true
#tomcat.util.buf.StringCache.trainThreshold=500000
#tomcat.util.buf.StringCache.cacheSize=5000
org.apache.tomcat.util.digester.PROPERTY_SOURCE=org.apache.tomcat.util.digester.Digester$EnvironmentPropertySource
server.port=8080

View File

@ -1,9 +0,0 @@
dependencies:
- name: postgresql
repository: oci://registry-1.docker.io/bitnamicharts
version: 16.5.5
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.30.0
digest: sha256:c114b296b53007a7973260de8bad61917fe741c0ad9de15fdbeea5743c8f4910
generated: "2025-03-21T21:44:22.334197366+01:00"

View File

@ -1,34 +0,0 @@
apiVersion: v2
name: hapi-fhir-jpaserver
description: A Helm chart for deploying the HAPI FHIR JPA server starter on Kubernetes.
type: application
home: https://github.com/hapifhir/hapi-fhir-jpaserver-starter
sources:
- https://github.com/hapifhir/hapi-fhir-jpaserver-starter
dependencies:
- name: postgresql
version: 16.5.5
repository: oci://registry-1.docker.io/bitnamicharts
condition: postgresql.enabled
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.30.0
appVersion: 8.0.0
version: 0.19.0
annotations:
artifacthub.io/license: Apache-2.0
artifacthub.io/containsSecurityUpdates: "false"
artifacthub.io/operator: "false"
artifacthub.io/prerelease: "false"
artifacthub.io/recommendations: |
- url: https://artifacthub.io/packages/helm/prometheus-community/kube-prometheus-stack
- url: https://artifacthub.io/packages/helm/bitnami/postgresql
artifacthub.io/changes: |
# When using the list of objects option the valid supported kinds are
# added, changed, deprecated, removed, fixed, and security.
- kind: changed
description: "updated postgresql sub-chart to 16.5.5"
- kind: changed
description: "updated curlimages/curl to 8.12.1"
- kind: changed
description: "updated hapiproject/hapi to v8.0.0-1"

View File

@ -1,150 +0,0 @@
# HAPI FHIR JPA Server Starter Helm Chart
![Version: 0.19.0](https://img.shields.io/badge/Version-0.19.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 8.0.0](https://img.shields.io/badge/AppVersion-8.0.0-informational?style=flat-square)
This helm chart will help you install the HAPI FHIR JPA Server in a Kubernetes environment.
## Sample usage
```sh
helm repo add hapifhir https://hapifhir.github.io/hapi-fhir-jpaserver-starter/
helm install hapi-fhir-jpaserver hapifhir/hapi-fhir-jpaserver
```
## Requirements
| Repository | Name | Version |
|------------|------|---------|
| oci://registry-1.docker.io/bitnamicharts | common | 2.30.0 |
| oci://registry-1.docker.io/bitnamicharts | postgresql | 16.5.5 |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | pod affinity |
| deploymentAnnotations | object | `{}` | annotations applied to the server deployment |
| externalDatabase.database | string | `"fhir"` | database name |
| externalDatabase.existingSecret | string | `""` | name of an existing secret resource containing the DB password in the `existingSecretKey` key |
| externalDatabase.existingSecretKey | string | `"postgresql-password"` | name of the key inside the `existingSecret` |
| externalDatabase.host | string | `"localhost"` | external database host used with `postgresql.enabled=false` |
| externalDatabase.password | string | `""` | database password |
| externalDatabase.port | int | `5432` | database port number |
| externalDatabase.user | string | `"fhir"` | username for the external database |
| extraConfig | string | `""` | additional Spring Boot application config. Mounted as a file and automatically loaded by the application. |
| extraEnv | list | `[]` | extra environment variables to set on the server container |
| extraVolumeMounts | list | `[]` | Optionally specify extra list of additional volumeMounts |
| extraVolumes | list | `[]` | Optionally specify extra list of additional volumes |
| fullnameOverride | string | `""` | override the chart fullname |
| image.pullPolicy | string | `"IfNotPresent"` | image pullPolicy to use |
| image.registry | string | `"docker.io"` | registry where the HAPI FHIR server image is hosted |
| image.repository | string | `"hapiproject/hapi"` | the path inside the repository |
| image.tag | string | `"v8.0.0-1@sha256:9fbac7b012b4be91ba481e7008f1353ede4598bc99a36f3902b8abf873e70ed8"` | the image tag. As of v5.7.0, this is the `distroless` flavor by default, add `-tomcat` to use the Tomcat-based image. |
| imagePullSecrets | list | `[]` | image pull secrets to use when pulling the image |
| ingress.annotations | object | `{}` | provide any additional annotations which may be required. Evaluated as a template. |
| ingress.enabled | bool | `false` | whether to create an Ingress to expose the FHIR server HTTP endpoint |
| ingress.hosts[0].host | string | `"fhir-server.127.0.0.1.nip.io"` | |
| ingress.hosts[0].pathType | string | `"ImplementationSpecific"` | |
| ingress.hosts[0].paths[0] | string | `"/"` | |
| ingress.tls | list | `[]` | ingress TLS config |
| initContainers.resources | object | `{}` | configure the init containers pods resource requests and limits |
| initContainers.resourcesPreset | string | `"nano"` | set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if `resources` is set (`resources` is recommended for production). More information: <https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15> |
| metrics.service.port | int | `8081` | |
| metrics.serviceMonitor.additionalLabels | object | `{}` | additional labels to apply to the ServiceMonitor object, e.g. `release: prometheus` |
| metrics.serviceMonitor.enabled | bool | `false` | if enabled, creates a ServiceMonitor instance for Prometheus Operator-based monitoring |
| nameOverride | string | `""` | override the chart name |
| nodeSelector | object | `{}` | node selector for the pod |
| podAnnotations | object | `{}` | annotations applied to the server pod |
| podDisruptionBudget.enabled | bool | `false` | Enable PodDisruptionBudget for the server pods. uses policy/v1/PodDisruptionBudget thus requiring k8s 1.21+ |
| podDisruptionBudget.maxUnavailable | string | `""` | maximum unavailable instances |
| podDisruptionBudget.minAvailable | int | `1` | minimum available instances |
| podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532,"seccompProfile":{"type":"RuntimeDefault"}}` | pod security context |
| postgresql.auth.database | string | `"fhir"` | name for a custom database to create |
| postgresql.auth.existingSecret | string | `""` | Name of existing secret to use for PostgreSQL credentials `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret The secret must contain the keys `postgres-password` (which is the password for "postgres" admin user), `password` (which is the password for the custom user to create when `auth.username` is set), and `replication-password` (which is the password for replication user). The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. The value is evaluated as a template. |
| postgresql.enabled | bool | `true` | enable an included PostgreSQL DB. see <https://github.com/bitnami/charts/tree/master/bitnami/postgresql> for details if set to `false`, the values under `externalDatabase` are used |
| replicaCount | int | `1` | number of replicas to deploy |
| resources | object | `{}` | configure the FHIR server's resource requests and limits |
| resourcesPreset | string | `"medium"` | set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if `resources` is set (`resources` is recommended for production). More information: <https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15> |
| securityContext.allowPrivilegeEscalation | bool | `false` | |
| securityContext.capabilities.drop[0] | string | `"ALL"` | |
| securityContext.privileged | bool | `false` | |
| securityContext.readOnlyRootFilesystem | bool | `true` | |
| securityContext.runAsGroup | int | `65532` | |
| securityContext.runAsNonRoot | bool | `true` | |
| securityContext.runAsUser | int | `65532` | |
| securityContext.seccompProfile.type | string | `"RuntimeDefault"` | |
| service.port | int | `8080` | port where the server will be exposed at |
| service.type | string | `"ClusterIP"` | service type |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.automount | bool | `true` | Automatically mount a ServiceAccount's API credentials? |
| serviceAccount.create | bool | `false` | Specifies whether a service account should be created. |
| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
| tests.automountServiceAccountToken | bool | `false` | whether the service account token should be auto-mounted for the test pods |
| tests.resources | object | `{}` | configure the test pods resource requests and limits |
| tests.resourcesPreset | string | `"nano"` | set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if `resources` is set (`resources` is recommended for production). More information: <https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15> |
| tolerations | list | `[]` | pod tolerations |
| topologySpreadConstraints | list | `[]` | pod topology spread configuration see: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#api |
## Development
To update the Helm chart when a new version of the `hapiproject/hapi` image is released, [values.yaml](values.yaml) `image.tag` and the [Chart.yaml](Chart.yaml)'s
`version` and optionally the `appVersion` field need to be updated. Afterwards, re-generate the [README.md](README.md)
by running:
```sh
$ helm-docs
INFO[2021-11-20T12:38:04Z] Found Chart directories [charts/hapi-fhir-jpaserver]
INFO[2021-11-20T12:38:04Z] Generating README Documentation for chart /usr/src/app/charts/hapi-fhir-jpaserver
```
## Enable Distributed Tracing based on the OpenTelemtry Java Agent
The container image includes the [OpenTelemetry Java agent JAR](https://github.com/open-telemetry/opentelemetry-java-instrumentation)
which can be used to enable distributed tracing. It can be configured entirely using environment variables,
see <https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/> for details.
Here's an example setup deploying [Jaeger](https://www.jaegertracing.io/) as a tracing backend:
```sh
# required by the Jaeger Operator
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml
kubectl create namespace observability
kubectl create -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.37.0/jaeger-operator.yaml -n observability
cat <<EOF | kubectl apply -n observability -f -
# simple, all-in-one Jaeger installation. Not suitable for production use.
apiVersion: jaegertracing.io/v1
kind: Jaeger
metadata:
name: simplest
EOF
```
Use this chart's `extraEnv` value to set the required environment variables:
```yaml
extraEnv:
- name: JAVA_TOOL_OPTIONS
value: "-javaagent:/app/opentelemetry-javaagent.jar"
- name: OTEL_METRICS_EXPORTER
value: "none"
- name: OTEL_LOGS_EXPORTER
value: "none"
- name: OTEL_TRACES_EXPORTER
value: "jaeger"
- name: OTEL_SERVICE_NAME
value: "hapi-fhir-jpaserver"
- name: OTEL_EXPORTER_JAEGER_ENDPOINT
value: "http://simplest-collector.observability.svc:14250"
```
Finally, you can open the Jaeger query UI by running:
```sh
kubectl port-forward -n observability service/simplest-query 16686:16686
```
and opening <http://localhost:16686/> in your browser.
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View File

@ -1,79 +0,0 @@
# HAPI FHIR JPA Server Starter Helm Chart
{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
This helm chart will help you install the HAPI FHIR JPA Server in a Kubernetes environment.
## Sample usage
```sh
helm repo add hapifhir https://hapifhir.github.io/hapi-fhir-jpaserver-starter/
helm install hapi-fhir-jpaserver hapifhir/hapi-fhir-jpaserver
```
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}
## Development
To update the Helm chart when a new version of the `hapiproject/hapi` image is released, [values.yaml](values.yaml) `image.tag` and the [Chart.yaml](Chart.yaml)'s
`version` and optionally the `appVersion` field need to be updated. Afterwards, re-generate the [README.md](README.md)
by running:
```sh
$ helm-docs
INFO[2021-11-20T12:38:04Z] Found Chart directories [charts/hapi-fhir-jpaserver]
INFO[2021-11-20T12:38:04Z] Generating README Documentation for chart /usr/src/app/charts/hapi-fhir-jpaserver
```
## Enable Distributed Tracing based on the OpenTelemtry Java Agent
The container image includes the [OpenTelemetry Java agent JAR](https://github.com/open-telemetry/opentelemetry-java-instrumentation)
which can be used to enable distributed tracing. It can be configured entirely using environment variables,
see <https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/> for details.
Here's an example setup deploying [Jaeger](https://www.jaegertracing.io/) as a tracing backend:
```sh
# required by the Jaeger Operator
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.yaml
kubectl create namespace observability
kubectl create -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.37.0/jaeger-operator.yaml -n observability
cat <<EOF | kubectl apply -n observability -f -
# simple, all-in-one Jaeger installation. Not suitable for production use.
apiVersion: jaegertracing.io/v1
kind: Jaeger
metadata:
name: simplest
EOF
```
Use this chart's `extraEnv` value to set the required environment variables:
```yaml
extraEnv:
- name: JAVA_TOOL_OPTIONS
value: "-javaagent:/app/opentelemetry-javaagent.jar"
- name: OTEL_METRICS_EXPORTER
value: "none"
- name: OTEL_LOGS_EXPORTER
value: "none"
- name: OTEL_TRACES_EXPORTER
value: "jaeger"
- name: OTEL_SERVICE_NAME
value: "hapi-fhir-jpaserver"
- name: OTEL_EXPORTER_JAEGER_ENDPOINT
value: "http://simplest-collector.observability.svc:14250"
```
Finally, you can open the Jaeger query UI by running:
```sh
kubectl port-forward -n observability service/simplest-query 16686:16686
```
and opening <http://localhost:16686/> in your browser.
{{ template "helm-docs.versionFooter" . }}

View File

@ -1,7 +0,0 @@
postgresql:
enabled: true
auth:
username: hapi_fhir_jpaserver_starter_user
database: hapi_fhir_jpaserver_starter
password: secret_user_password
postgresPassword: secret_postgres_password

View File

@ -1,6 +0,0 @@
ingress:
enabled: true
postgresql:
auth:
postgresPassword: secretpassword

View File

@ -1,17 +0,0 @@
extraConfig: |
hapi:
fhir:
cr_enabled: true
tester:
home:
name: Hello HAPI FHIR
server_address: "http://fhir-server.127.0.0.1.nip.io/fhir"
refuse_to_fetch_third_party_urls: true
fhir_version: R4
ingress:
enabled: true
hosts:
- host: fhir-server.127.0.0.1.nip.io
pathType: ImplementationSpecific
paths: ["/"]

View File

@ -1,11 +0,0 @@
extraVolumes:
- name: config-kube-root-ca
configMap:
name: kube-root-ca.crt
items:
- key: ca.crt
path: ca.crt
extraVolumeMounts:
- name: config-kube-root-ca
mountPath: /etc/test

View File

@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hapi-fhir-jpaserver.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "hapi-fhir-jpaserver.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hapi-fhir-jpaserver.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "hapi-fhir-jpaserver.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@ -1,152 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "hapi-fhir-jpaserver.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "hapi-fhir-jpaserver.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "hapi-fhir-jpaserver.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "hapi-fhir-jpaserver.labels" -}}
helm.sh/chart: {{ include "hapi-fhir-jpaserver.chart" . }}
{{ include "hapi-fhir-jpaserver.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "hapi-fhir-jpaserver.selectorLabels" -}}
app.kubernetes.io/name: {{ include "hapi-fhir-jpaserver.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "hapi-fhir-jpaserver.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "hapi-fhir-jpaserver.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create a default fully qualified postgresql name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "hapi-fhir-jpaserver.postgresql.fullname" -}}
{{- $name := default "postgresql" .Values.postgresql.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Get the Postgresql credentials secret name.
*/}}
{{- define "hapi-fhir-jpaserver.postgresql.secretName" -}}
{{- if .Values.postgresql.enabled -}}
{{- if .Values.postgresql.auth.existingSecret -}}
{{- printf "%s" .Values.postgresql.auth.existingSecret -}}
{{- else -}}
{{- printf "%s" (include "hapi-fhir-jpaserver.postgresql.fullname" .) -}}
{{- end -}}
{{- else }}
{{- if .Values.externalDatabase.existingSecret -}}
{{- printf "%s" .Values.externalDatabase.existingSecret -}}
{{- else -}}
{{ printf "%s-%s" (include "hapi-fhir-jpaserver.fullname" .) "external-db" }}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Get the Postgresql credentials secret key.
*/}}
{{- define "hapi-fhir-jpaserver.postgresql.secretKey" -}}
{{- if .Values.postgresql.enabled -}}
{{- if .Values.postgresql.auth.username -}}
{{- printf "%s" .Values.postgresql.auth.secretKeys.userPasswordKey -}}
{{- else -}}
{{- printf "%s" .Values.postgresql.auth.secretKeys.adminPasswordKey -}}
{{- end -}}
{{- else }}
{{- if .Values.externalDatabase.existingSecret -}}
{{- printf "%s" .Values.externalDatabase.existingSecretKey -}}
{{- else -}}
{{- printf "postgres-password" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Add environment variables to configure database values
*/}}
{{- define "hapi-fhir-jpaserver.database.host" -}}
{{- ternary (include "hapi-fhir-jpaserver.postgresql.fullname" .) .Values.externalDatabase.host .Values.postgresql.enabled -}}
{{- end -}}
{{/*
Add environment variables to configure database values
*/}}
{{- define "hapi-fhir-jpaserver.database.user" -}}
{{- if .Values.postgresql.enabled -}}
{{- printf "%s" .Values.postgresql.auth.username | default "postgres" -}}
{{- else -}}
{{- printf "%s" .Values.externalDatabase.user -}}
{{- end -}}
{{- end -}}
{{/*
Add environment variables to configure database values
*/}}
{{- define "hapi-fhir-jpaserver.database.name" -}}
{{- ternary .Values.postgresql.auth.database .Values.externalDatabase.database .Values.postgresql.enabled -}}
{{- end -}}
{{/*
Add environment variables to configure database values
*/}}
{{- define "hapi-fhir-jpaserver.database.port" -}}
{{- ternary "5432" .Values.externalDatabase.port .Values.postgresql.enabled -}}
{{- end -}}
{{/*
Create the JDBC URL from the host, port and database name.
*/}}
{{- define "hapi-fhir-jpaserver.database.jdbcUrl" -}}
{{- $host := (include "hapi-fhir-jpaserver.database.host" .) -}}
{{- $port := (include "hapi-fhir-jpaserver.database.port" .) -}}
{{- $name := (include "hapi-fhir-jpaserver.database.name" .) -}}
{{- $appName := .Release.Name -}}
{{ printf "jdbc:postgresql://%s:%d/%s?ApplicationName=%s" $host (int $port) $name $appName }}
{{- end -}}

View File

@ -1,11 +0,0 @@
{{- if .Values.extraConfig -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "hapi-fhir-jpaserver.fullname" . }}-application-config
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
data:
application-extra.yaml: |-
{{ .Values.extraConfig | nindent 4 }}
{{- end }}

View File

@ -1,160 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "hapi-fhir-jpaserver.fullname" . }}
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
{{- with .Values.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "hapi-fhir-jpaserver.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "hapi-fhir-jpaserver.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "hapi-fhir-jpaserver.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
initContainers:
- name: wait-for-db-to-be-ready
image: docker.io/bitnami/postgresql:17.4.0-debian-12-r10@sha256:7b9af9dd759055265998bbf12368e6d7d6326e6fd23f8157be841fad0915c1a1
imagePullPolicy: IfNotPresent
{{- with .Values.restrictedContainerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.initContainers.resources }}
resources: {{- toYaml .Values.initContainers.resources | nindent 12 }}
{{- else if ne .Values.initContainers.resourcesPreset "none" }}
resources: {{- include "common.resources.preset" (dict "type" .Values.initContainers.resourcesPreset) | nindent 12 }}
{{- end }}
env:
- name: PGHOST
value: "{{ include "hapi-fhir-jpaserver.database.host" . }}"
- name: PGPORT
value: "{{ include "hapi-fhir-jpaserver.database.port" . }}"
- name: PGUSER
value: "{{ include "hapi-fhir-jpaserver.database.user" . }}"
command: ["/bin/sh", "-c"]
args:
- |
until pg_isready; do
echo "Waiting for DB ${PGUSER}@${PGHOST}:${PGPORT} to be up";
sleep 15;
done;
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8080
protocol: TCP
- name: http-metrics
containerPort: 8081
protocol: TCP
{{- with .Values.startupProbe }}
startupProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.readinessProbe }}
readinessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.resources }}
resources: {{- toYaml .Values.resources | nindent 12 }}
{{- else if ne .Values.resourcesPreset "none" }}
resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }}
{{- end }}
env:
- name: SPRING_DATASOURCE_URL
value: {{ include "hapi-fhir-jpaserver.database.jdbcUrl" $ }}
- name: SPRING_DATASOURCE_USERNAME
value: {{ include "hapi-fhir-jpaserver.database.user" $ }}
- name: SPRING_DATASOURCE_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "hapi-fhir-jpaserver.postgresql.secretName" . }}
key: {{ include "hapi-fhir-jpaserver.postgresql.secretKey" . }}
- name: SPRING_DATASOURCE_DRIVERCLASSNAME
value: org.postgresql.Driver
- name: spring.jpa.properties.hibernate.dialect
value: ca.uhn.fhir.jpa.model.dialect.HapiFhirPostgres94Dialect
- name: HAPI_FHIR_USE_APACHE_ADDRESS_STRATEGY
value: "true"
- name: MANAGEMENT_ENDPOINT_HEALTH_PROBES_ADD_ADDITIONAL_PATHS
value: "true"
- name: MANAGEMENT_SERVER_PORT
value: "8081"
- name: MANAGEMENT_ENDPOINTS_WEB_EXPOSURE_INCLUDE
value: "health,prometheus"
{{- if .Values.extraConfig }}
- name: SPRING_CONFIG_IMPORT
value: "/app/config/application-extra.yaml"
{{- end }}
{{- if .Values.extraEnv }}
{{ toYaml .Values.extraEnv | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /tmp
name: tmp-volume
- mountPath: /app/target
name: lucenefiles-volume
{{- if .Values.extraConfig }}
- name: application-extra-config
mountPath: /app/config/application-extra.yaml
readOnly: true
subPath: application-extra.yaml
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: tmp-volume
emptyDir: {}
- name: lucenefiles-volume
emptyDir: {}
{{- if .Values.extraConfig }}
- name: application-extra-config
configMap:
name: {{ include "hapi-fhir-jpaserver.fullname" . }}-application-config
{{- end }}
{{- if .Values.extraVolumes }}
{{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }}
{{- end }}

View File

@ -1,11 +0,0 @@
{{- if and (not .Values.postgresql.enabled) (not .Values.externalDatabase.existingSecret) (not .Values.postgresql.auth.existingSecret) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "hapi-fhir-jpaserver.fullname" . }}-external-db
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
type: Opaque
data:
postgres-password: {{ .Values.externalDatabase.password | b64enc | quote }}
{{- end }}

View File

@ -1,53 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "hapi-fhir-jpaserver.fullname" . -}}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
apiVersion: networking.k8s.io/v1beta1
{{ else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
{{- $pathType := .pathType }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
pathType: {{ $pathType | default "ImplementationSpecific" }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
name: http
{{ else }}
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -1,18 +0,0 @@
{{- if .Values.podDisruptionBudget.enabled }}
kind: PodDisruptionBudget
apiVersion: policy/v1
metadata:
name: {{ include "hapi-fhir-jpaserver.fullname" . }}
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
spec:
{{- if .Values.podDisruptionBudget.minAvailable }}
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
selector:
matchLabels:
{{- include "hapi-fhir-jpaserver.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "hapi-fhir-jpaserver.fullname" . }}
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
- port: {{ .Values.metrics.service.port }}
targetPort: http-metrics
protocol: TCP
name: http-metrics
selector:
{{- include "hapi-fhir-jpaserver.selectorLabels" . | nindent 4 }}

View File

@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "hapi-fhir-jpaserver.serviceAccountName" . }}
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@ -1,30 +0,0 @@
{{- if .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "hapi-fhir-jpaserver.fullname" . }}
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- end }}
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
{{- if .Values.metrics.serviceMonitor.additionalLabels }}
{{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: http-metrics
path: /actuator/prometheus
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "hapi-fhir-jpaserver.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@ -1,73 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "hapi-fhir-jpaserver.fullname" . }}-test-endpoints"
labels:
{{- include "hapi-fhir-jpaserver.labels" . | nindent 4 }}
{{ include "hapi-fhir-jpaserver.fullname" . }}-client: "true"
app.kubernetes.io/component: tests
annotations:
"helm.sh/hook": test
spec:
restartPolicy: Never
automountServiceAccountToken: {{ .Values.tests.automountServiceAccountToken }}
securityContext:
{{- toYaml .Values.tests.podSecurityContext | nindent 4 }}
containers:
- name: test-metadata-endpoint
image: "{{ .Values.curl.image.registry }}/{{ .Values.curl.image.repository }}:{{ .Values.curl.image.tag }}"
command: ["curl", "--fail-with-body"]
args: ["http://{{ include "hapi-fhir-jpaserver.fullname" . }}:{{ .Values.service.port }}/fhir/metadata?_summary=true"]
{{- with .Values.restrictedContainerSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.tests.resources }}
resources: {{- toYaml .Values.tests.resources | nindent 10 }}
{{- else if ne .Values.tests.resourcesPreset "none" }}
resources: {{- include "common.resources.preset" (dict "type" .Values.tests.resourcesPreset) | nindent 10 }}
{{- end }}
livenessProbe:
exec:
command: ["true"]
readinessProbe:
exec:
command: ["true"]
- name: test-patient-endpoint
image: "{{ .Values.curl.image.registry }}/{{ .Values.curl.image.repository }}:{{ .Values.curl.image.tag }}"
command: ["curl", "--fail-with-body"]
args: ["http://{{ include "hapi-fhir-jpaserver.fullname" . }}:{{ .Values.service.port }}/fhir/Patient?_count=1&_summary=true"]
{{- with .Values.restrictedContainerSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.tests.resources }}
resources: {{- toYaml .Values.tests.resources | nindent 10 }}
{{- else if ne .Values.tests.resourcesPreset "none" }}
resources: {{- include "common.resources.preset" (dict "type" .Values.tests.resourcesPreset) | nindent 10 }}
{{- end }}
livenessProbe:
exec:
command: ["true"]
readinessProbe:
exec:
command: ["true"]
- name: test-metrics-endpoint
image: "{{ .Values.curl.image.registry }}/{{ .Values.curl.image.repository }}:{{ .Values.curl.image.tag }}"
command: ["curl", "--fail-with-body"]
args: ["http://{{ include "hapi-fhir-jpaserver.fullname" . }}:{{ .Values.metrics.service.port }}/actuator/prometheus"]
{{- with .Values.restrictedContainerSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.tests.resources }}
resources: {{- toYaml .Values.tests.resources | nindent 10 }}
{{- else if ne .Values.tests.resourcesPreset "none" }}
resources: {{- include "common.resources.preset" (dict "type" .Values.tests.resourcesPreset) | nindent 10 }}
{{- end }}
livenessProbe:
exec:
command: ["true"]
readinessProbe:
exec:
command: ["true"]

View File

@ -1,302 +0,0 @@
# -- number of replicas to deploy
replicaCount: 1
image:
# -- registry where the HAPI FHIR server image is hosted
registry: docker.io
# -- the path inside the repository
repository: hapiproject/hapi
# -- the image tag. As of v5.7.0, this is the `distroless` flavor by default, add `-tomcat` to use the Tomcat-based image.
tag: "v8.0.0-1@sha256:9fbac7b012b4be91ba481e7008f1353ede4598bc99a36f3902b8abf873e70ed8"
# -- image pullPolicy to use
pullPolicy: IfNotPresent
# -- image pull secrets to use when pulling the image
imagePullSecrets: []
# -- override the chart name
nameOverride: ""
# -- override the chart fullname
fullnameOverride: ""
# -- annotations applied to the server deployment
deploymentAnnotations: {}
# -- annotations applied to the server pod
podAnnotations: {}
# -- pod security context
podSecurityContext:
fsGroupChangePolicy: OnRootMismatch
runAsNonRoot: true
runAsGroup: 65532
runAsUser: 65532
fsGroup: 65532
seccompProfile:
type: RuntimeDefault
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
runAsGroup: 65532
privileged: false
seccompProfile:
type: RuntimeDefault
# service to expose the server
service:
# -- service type
type: ClusterIP
# -- port where the server will be exposed at
port: 8080
ingress:
# -- whether to create an Ingress to expose the FHIR server HTTP endpoint
enabled: false
# -- provide any additional annotations which may be required. Evaluated as a template.
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: fhir-server.127.0.0.1.nip.io
pathType: ImplementationSpecific
paths: ["/"]
# -- ingress TLS config
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# -- set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge).
# This is ignored if `resources` is set (`resources` is recommended for production).
# More information: <https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15>
resourcesPreset: "medium"
# -- configure the FHIR server's resource requests and limits
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- node selector for the pod
nodeSelector: {}
# -- pod tolerations
tolerations: []
# -- pod affinity
affinity: {}
# -- pod topology spread configuration
# see: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#api
topologySpreadConstraints:
[]
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# whenUnsatisfiable: ScheduleAnyway
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: hapi-fhir-jpaserver
# app.kubernetes.io/name: hapi-fhir-jpaserver
postgresql:
# -- enable an included PostgreSQL DB.
# see <https://github.com/bitnami/charts/tree/master/bitnami/postgresql> for details
# if set to `false`, the values under `externalDatabase` are used
enabled: true
auth:
# -- name for a custom database to create
database: "fhir"
# -- Name of existing secret to use for PostgreSQL credentials
# `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret
# The secret must contain the keys `postgres-password` (which is the password for "postgres" admin user),
# `password` (which is the password for the custom user to create when `auth.username` is set),
# and `replication-password` (which is the password for replication user).
# The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and
# picked from this secret in this case.
# The value is evaluated as a template.
existingSecret: ""
# -- readiness probe
# @ignored
readinessProbe:
httpGet:
path: /readyz
port: http
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 20
successThreshold: 1
timeoutSeconds: 20
# -- liveness probe
# @ignored
livenessProbe:
httpGet:
path: /livez
port: http
failureThreshold: 5
initialDelaySeconds: 30
periodSeconds: 20
successThreshold: 1
timeoutSeconds: 30
# -- startup probe
# @ignored
startupProbe:
httpGet:
path: /readyz
port: http
failureThreshold: 10
initialDelaySeconds: 30
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 30
externalDatabase:
# -- external database host used with `postgresql.enabled=false`
host: localhost
# -- database port number
port: 5432
# -- username for the external database
user: fhir
# -- database password
password: ""
# -- name of an existing secret resource containing the DB password in the `existingSecretKey` key
existingSecret: ""
# -- name of the key inside the `existingSecret`
existingSecretKey: "postgresql-password"
# -- database name
database: fhir
# -- extra environment variables to set on the server container
extraEnv:
[]
# - name: SPRING_FLYWAY_BASELINE_ON_MIGRATE
# value: "true"
podDisruptionBudget:
# -- Enable PodDisruptionBudget for the server pods.
# uses policy/v1/PodDisruptionBudget thus requiring k8s 1.21+
enabled: false
# -- minimum available instances
minAvailable: 1
# -- maximum unavailable instances
maxUnavailable: ""
serviceAccount:
# -- Specifies whether a service account should be created.
create: false
# -- Annotations to add to the service account
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# -- Automatically mount a ServiceAccount's API credentials?
automount: true
metrics:
serviceMonitor:
# -- if enabled, creates a ServiceMonitor instance for Prometheus Operator-based monitoring
enabled: false
# -- additional labels to apply to the ServiceMonitor object, e.g. `release: prometheus`
additionalLabels: {}
# namespace: monitoring
# interval: 30s
# scrapeTimeout: 10s
service:
port: 8081
# @ignore
restrictedContainerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
privileged: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 65534
runAsGroup: 65534
seccompProfile:
type: RuntimeDefault
# @ignored
curl:
image:
registry: docker.io
repository: curlimages/curl
tag: 8.12.1@sha256:94e9e444bcba979c2ea12e27ae39bee4cd10bc7041a472c4727a558e213744e6
tests:
# -- whether the service account token should be auto-mounted for the test pods
automountServiceAccountToken: false
# -- set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge).
# This is ignored if `resources` is set (`resources` is recommended for production).
# More information: <https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15>
resourcesPreset: "nano"
# -- configure the test pods resource requests and limits
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# @ignored
podSecurityContext:
fsGroupChangePolicy: OnRootMismatch
runAsNonRoot: true
runAsGroup: 65532
runAsUser: 65532
fsGroup: 65532
seccompProfile:
type: RuntimeDefault
initContainers:
# -- set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge).
# This is ignored if `resources` is set (`resources` is recommended for production).
# More information: <https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15>
resourcesPreset: "nano"
# -- configure the init containers pods resource requests and limits
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# -- additional Spring Boot application config. Mounted as a file and automatically loaded by the application.
extraConfig:
""
# # For example:
# |
# hapi:
# fhir:
# implementationguides:
# gh_0_1_0:
# url: https://build.fhir.org/ig/hl7-eu/gravitate-health/package.tgz
# name: hl7.eu.fhir.gh
# version: 0.1.0
# -- Optionally specify extra list of additional volumes
extraVolumes: []
# -- Optionally specify extra list of additional volumeMounts
extraVolumeMounts: []

View File

@ -1,3 +0,0 @@
<p>
Greetings from the custom web app page!
</p>

View File

@ -1,14 +0,0 @@
<p>
<b>This is a custom about page! It means you have configured 'custom_content_path: ./custom' in the application.yaml</b>
</p>
<p>
This server provides a complete implementation of the FHIR Specification
using a 100% open source software stack.
</p>
<p>
This server is built
from a number of modules of the
<a href="https://github.com/hapifhir/hapi-fhir/">HAPI FHIR</a>
project, which is a 100% open-source (Apache 2.0 Licensed) Java based
implementation of the FHIR specification.
</p>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

View File

@ -1,14 +0,0 @@
<p>
<b>This is a custom welcome page! It means you have configured 'custom_content_path: ./custom' in the application.yaml</b>
</p>
<p>
This server provides a complete implementation of the FHIR Specification
using a 100% open source software stack.
</p>
<p>
This server is built
from a number of modules of the
<a href="https://github.com/hapifhir/hapi-fhir/">HAPI FHIR</a>
project, which is a 100% open-source (Apache 2.0 Licensed) Java based
implementation of the FHIR specification.
</p>

View File

@ -1 +0,0 @@
docker build --tag hapiproject/hapi:latest --tag hapiproject/hapi:4.1 -m 4g .

View File

@ -1,25 +0,0 @@
version: "3"
services:
hapi-fhir-jpaserver-start:
build: .
container_name: hapi-fhir-jpaserver-start
restart: on-failure
environment:
SPRING_DATASOURCE_URL: "jdbc:postgresql://hapi-fhir-postgres:5432/hapi"
SPRING_DATASOURCE_USERNAME: "admin"
SPRING_DATASOURCE_PASSWORD: "admin"
SPRING_DATASOURCE_DRIVERCLASSNAME: "org.postgresql.Driver"
ports:
- "8080:8080"
hapi-fhir-postgres:
image: postgres:15-alpine
container_name: hapi-fhir-postgres
restart: always
environment:
POSTGRES_DB: "hapi"
POSTGRES_USER: "admin"
POSTGRES_PASSWORD: "admin"
volumes:
- hapi-fhir-postgres:/var/lib/postgresql/data
volumes:
hapi-fhir-postgres:

View File

@ -1,717 +0,0 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<properties>
<java.version>17</java.version>
<hapi.fhir.jpa.server.starter.revision>1</hapi.fhir.jpa.server.starter.revision>
<clinical-reasoning.version>3.19.0</clinical-reasoning.version>
</properties>
<!-- one-liner to take you to the cloud with settings form the application.yaml file: -->
<!-- 'mvn clean package com.google.cloud.tools:jib-maven-plugin:dockerBuild -Dimage=distroless-hapi && docker run -p 8080:8080 distroless-hapi' -->
<!--
Note: HAPI projects use the "hapi-fhir" POM as their base to provide easy management.
You do not need to use this in your own projects, so the "parent" tag and it's
contents below may be removed
if you are using this file as a basis for your own project.
-->
<parent>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir</artifactId>
<version>8.0.0</version>
</parent>
<artifactId>hapi-fhir-jpaserver-starter</artifactId>
<version>${project.parent.version}-${hapi.fhir.jpa.server.starter.revision}</version>
<packaging>war</packaging>
<prerequisites>
<maven>3.8.3</maven>
</prerequisites>
<name>HAPI FHIR JPA Server - Starter Project</name>
<repositories>
<repository>
<id>oss-snapshots</id>
<snapshots>
<enabled>true</enabled>
</snapshots>
<url>https://oss.sonatype.org/content/repositories/snapshots/</url>
</repository>
</repositories>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.glassfish.jaxb</groupId>
<artifactId>jaxb-runtime</artifactId>
<version>2.3.8</version>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
</dependency>
<!-- Needed for Email subscriptions -->
<dependency>
<groupId>org.simplejavamail</groupId>
<artifactId>simple-java-mail</artifactId>
<exclusions>
<exclusion>
<groupId>jakarta.annotation</groupId>
<artifactId>jakarta.annotation-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- This dependency includes the core HAPI-FHIR classes -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-base</artifactId>
<version>${project.parent.version}</version>
</dependency>
<!-- This dependency includes the EmailSenderImpl we will be using instead of standard javamail.-->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-jpaserver-subscription</artifactId>
<version>${project.parent.version}</version>
<exclusions>
<exclusion>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP-java7</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- This dependency includes the JPA server itself, which is packaged separately from the rest of HAPI FHIR -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-jpaserver-base</artifactId>
<version>${project.parent.version}</version>
<exclusions>
<exclusion>
<groupId>org.springframework</groupId>
<artifactId>spring-jcl</artifactId>
</exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- This dependency includes the JPA CQL Server -->
<dependency>
<groupId>org.opencds.cqf.fhir</groupId>
<artifactId>cqf-fhir-cr-hapi</artifactId>
<version>${clinical-reasoning.version}</version>
</dependency>
<!-- This dependency includes the JPA MDM Server -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-jpaserver-mdm</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
</dependency>
<!-- This dependency includes the CDS Hooks Server -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-server-cds-hooks</artifactId>
<version>${project.parent.version}</version>
</dependency>
<!-- This dependency includes the OpenAPI Server -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-server-openapi</artifactId>
<version>${project.parent.version}</version>
<exclusions>
<exclusion>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- This dependency is used for the "FHIR Tester" web app overlay -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-testpage-overlay</artifactId>
<version>${project.parent.version}</version>
<type>war</type>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-testpage-overlay</artifactId>
<version>${project.parent.version}</version>
<classifier>classes</classifier>
</dependency>
<!-- This dependency is used to include the IPS Base Implementation -->
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-jpaserver-ips</artifactId>
<version>${project.parent.version}</version>
</dependency>
<!-- HAPI-FHIR uses Logback for logging support. The logback library is included automatically by Maven as a part of the hapi-fhir-base dependency, but you also need to include a logging library. Logback
is used here, but log4j would also be fine. -->
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
</dependency>
<!-- Needed for JEE/Servlet support -->
<dependency>
<groupId>jakarta.servlet</groupId>
<artifactId>jakarta.servlet-api</artifactId>
<scope>provided</scope>
</dependency>
<!-- If you are using HAPI narrative generation, you will need to include Thymeleaf as well. Otherwise the following can be omitted. -->
<dependency>
<groupId>org.thymeleaf</groupId>
<artifactId>thymeleaf</artifactId>
</dependency>
<!-- Used for CORS support -->
<!-- Spring Web is used to deploy the server to a web container. -->
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-web</artifactId>
</dependency>
<!-- commons-logging is provided by jcl-over-slf4j, and the jar often shows up because of transitive dependencies; this dependency should avoid it -->
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.2</version>
<scope>provided</scope>
</dependency>
<!-- You may not need this if you are deploying to an application server which provides database connection pools itself. -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-dbcp2</artifactId>
<exclusions>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- This example uses H2 embedded database. If you are using another database such as Mysql or Oracle, you may omit the following dependencies and replace them with an appropriate database client
dependency for your database platform. -->
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
</dependency>
<!-- webjars -->
<dependency>
<groupId>org.webjars</groupId>
<artifactId>Eonasdan-bootstrap-datetimepicker</artifactId>
</dependency>
<dependency>
<groupId>org.webjars</groupId>
<artifactId>font-awesome</artifactId>
</dependency>
<dependency>
<groupId>org.webjars.bower</groupId>
<artifactId>awesome-bootstrap-checkbox</artifactId>
</dependency>
<dependency>
<groupId>org.webjars</groupId>
<artifactId>jstimezonedetect</artifactId>
</dependency>
<dependency>
<groupId>org.webjars</groupId>
<artifactId>select2</artifactId>
</dependency>
<dependency>
<groupId>org.webjars.bower</groupId>
<artifactId>jquery</artifactId>
</dependency>
<dependency>
<groupId>org.webjars.bower</groupId>
<artifactId>moment</artifactId>
</dependency>
<!-- The following dependencies are only needed for automated unit tests, you do not neccesarily need them to run the example. -->
<dependency>
<groupId>co.elastic.clients</groupId>
<artifactId>elasticsearch-java</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-jpaserver-test-utilities</artifactId>
<version>${project.parent.version}</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>com.sun.xml.bind</groupId>
<artifactId>jaxb-impl</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>elasticsearch</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<!--
For some reason JavaDoc crashed during site generation unless we have this dependency
-->
<dependency>
<groupId>jakarta.interceptor</groupId>
<artifactId>jakarta.interceptor-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-test-utilities</artifactId>
<version>${project.parent.version}</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.eclipse.jetty.ee10</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jetty.ee10.websocket</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jetty</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jetty.websocket</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<version>4.2.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure</artifactId>
<version>${spring_boot_version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-actuator</artifactId>
<version>${spring_boot_version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.micrometer/micrometer-core -->
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-core</artifactId>
<version>1.13.3</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.micrometer/micrometer-registry-prometheus -->
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-prometheus</artifactId>
<version>1.13.3</version>
</dependency>
<!-- https://mvnrepository.com/artifact/io.micrometer/micrometer-registry-prometheus-simpleclient -->
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-prometheus-simpleclient</artifactId>
<version>1.13.3</version>
</dependency>
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>5.0.1</version>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>3.6.0</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>3.4.0</version>
</plugin>
</plugins>
</pluginManagement>
<!-- Tells Maven to name the generated WAR file as ROOT.war -->
<finalName>ROOT</finalName>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<!--
java -jar ROOT.war doesn't work; there is a bug in spring-boot-3.2.0 due to hibernate search,
probably solved in 3.2.1 https://github.com/spring-projects/spring-boot/issues/38585
(at the moment only tomcat works)
-->
<loaderImplementation>CLASSIC</loaderImplementation>
</configuration>
<executions>
<execution>
<goals>
<goal>repackage</goal>
</goals>
<configuration>
<mainClass>ca.uhn.fhir.jpa.starter.Application</mainClass>
</configuration>
</execution>
</executions>
</plugin>
<!-- Tell Maven which Java source version you want to use -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.13.0</version>
<configuration>
<release>${java.version}</release>
</configuration>
</plugin>
<!-- The configuration here tells the WAR plugin to include the FHIR Tester overlay. You can omit it if you are not using that feature. -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-war-plugin</artifactId>
<configuration>
<archive>
<manifestEntries>
<Build-Time>${maven.build.timestamp}</Build-Time>
</manifestEntries>
</archive>
<attachClasses>true</attachClasses>
<overlays>
<overlay>
<groupId>ca.uhn.hapi.fhir</groupId>
<artifactId>hapi-fhir-testpage-overlay</artifactId>
</overlay>
</overlays>
<failOnMissingWebXml>false</failOnMissingWebXml>
</configuration>
</plugin>
<!-- This is to run the integration tests -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<version>3.4.0</version>
<configuration>
<redirectTestOutputToFile>true</redirectTestOutputToFile>
</configuration>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.basepom.maven</groupId>
<artifactId>duplicate-finder-maven-plugin</artifactId>
<executions>
<execution>
<id>default</id>
<phase>verify</phase>
<goals>
<goal>check</goal>
</goals>
<inherited>true</inherited>
</execution>
</executions>
<configuration>
<failBuildInCaseOfConflict>false</failBuildInCaseOfConflict>
<checkTestClasspath>false</checkTestClasspath>
<!--
<printEqualFiles>false</printEqualFiles>
<failBuildInCaseOfDifferentContentConflict>true</failBuildInCaseOfDifferentContentConflict>
<failBuildInCaseOfEqualContentConflict>true</failBuildInCaseOfEqualContentConflict>
<failBuildInCaseOfConflict>true</failBuildInCaseOfConflict>
<checkCompileClasspath>true</checkCompileClasspath>
<checkRuntimeClasspath>false</checkRuntimeClasspath>
<skip>false</skip>
<quiet>false</quiet>
<preferLocal>true</preferLocal>
<useResultFile>true</useResultFile>
<resultFileMinClasspathCount>2</resultFileMinClasspathCount>
<resultFile>${project.build.directory}/duplicate-finder-result.xml</resultFile>
-->
<!--
<ignoredDependencies>
<dependency>
<groupId>javax.el</groupId>
<artifactId>javax.el-api</artifactId>
</dependency>
<dependency>
<groupId>javax.mail</groupId>
<artifactId>javax.mail-api</artifactId>
</dependency>
<dependency>
<groupId>javax.activation</groupId>
<artifactId>javax.activation-api</artifactId>
</dependency>
<dependency>
<groupId>com.helger</groupId>
<artifactId>ph-schematron</artifactId>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</dependency>
<dependency>
<groupId>org.jscience</groupId>
<artifactId>jscience</artifactId>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jcl</artifactId>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jcl</artifactId>
</dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>annotations</artifactId>
</dependency>
<dependency>
<groupId>org.apache.derby</groupId>
<artifactId>derbyclient</artifactId>
</dependency>
<dependency>
<groupId>org.apache.derby</groupId>
<artifactId>derbynet</artifactId>
</dependency>
<dependency>
<groupId>org.apache.derby</groupId>
<artifactId>derbyclient</artifactId>
</dependency>
<dependency>
<groupId>org.checkerframework</groupId>
<artifactId>checker-compat-qual</artifactId>
</dependency>
</ignoredDependencies>
-->
<ignoredResourcePatterns>
<ignoredResourcePattern>.*\.txt$</ignoredResourcePattern>
<ignoredResourcePattern>.*\.html$</ignoredResourcePattern>
<ignoredResourcePattern>config/favicon.ico</ignoredResourcePattern>
</ignoredResourcePatterns>
</configuration>
</plugin>
</plugins>
</build>
<profiles>
<!-- Package the war for your preference. Use the boot profile if you prefer a single jar/war
that can be started with and embedded application server. Default is jetty as it is assumed
that the main users of this project already have an app server.
Different profiles are needed as packing it for spring boot, makes the resulting war undeployable
due to a class shading issue between tomcat and jetty.
(the error is 'java.util.ServiceConfigurationError: org.apache.juli.logging.Log: org.eclipse.jetty.apache.jsp.JuliLog not a subtype')
-->
<!-- example of how to start the server using spring boot-->
<!-- mvn clean package spring-boot:repackage -Pboot && java -jar target/ROOT.war -->
<!-- Use the boot profile for development and debugging options when using your IDE -->
<profile>
<id>boot</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<version>${spring_boot_version}</version>
</dependency>
</dependencies>
</profile>
<!-- -->
<!-- Examples of how to run the server: -->
<!-- -->
<!-- For Spring Boot use: -->
<!-- mvn spring-boot:run -->
<!-- -->
<!-- For JETTY use: -->
<!-- mvn -Pjetty spring-boot:run -->
<!-- -->
<!-- For the Tomcat WAR approach use: -->
<!-- mvn clean package -DskipTests && java -jar ./target/ROOT.war -->
<!-- -->
<!-- NEXT: Browse to http://localhost:8080/fhir -->
<!-- -->
<profile>
<id>jetty</id>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<version>${spring_boot_version}</version>
<exclusions>
<exclusion>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-tomcat</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-jetty</artifactId>
<version>${spring_boot_version}</version>
</dependency>
</dependencies>
</profile>
<profile>
<id>ossrh-repo</id>
<activation>
<activeByDefault>false</activeByDefault>
<property>
<name>deployToSonatype</name>
</property>
</activation>
<distributionManagement>
<snapshotRepository>
<id>ossrh</id>
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
</snapshotRepository>
<repository>
<id>ossrh</id>
<url>https://oss.sonatype.org/service/local/staging/deploy/maven2/</url>
</repository>
</distributionManagement>
<build>
<plugins>
<plugin>
<groupId>org.sonatype.plugins</groupId>
<artifactId>nexus-staging-maven-plugin</artifactId>
<version>1.6.13</version>
<extensions>true</extensions>
<configuration>
<serverId>ossrh</serverId>
<nexusUrl>https://oss.sonatype.org/</nexusUrl>
<autoReleaseAfterClose>true</autoReleaseAfterClose>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-gpg-plugin</artifactId>
<version>1.6</version>
<executions>
<execution>
<id>sign-artifacts</id>
<phase>verify</phase>
<goals>
<goal>sign</goal>
</goals>
<configuration>
<keyname>${gpg.keyname}</keyname>
<passphraseServerId>${gpg.keyname}</passphraseServerId>
<gpgArguments>
<arg>--pinentry-mode</arg>
<arg>loopback</arg>
</gpgArguments>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<!-- For connecting to GCP CloudSQL Postgres instances:
https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory/blob/main/docs/jdbc.md#postgres-1
Needs 'boot' profile as well. -->
<profile>
<id>cloudsql-postgres</id>
<dependencies>
<dependency>
<groupId>com.google.cloud.sql</groupId>
<artifactId>postgres-socket-factory</artifactId>
<version>1.17.0</version>
</dependency>
</dependencies>
</profile>
</profiles>
</project>

View File

@ -1,76 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Note: A "Server" is not itself a "Container", so you may not
define subcomponents such as "Valves" at this level.
Documentation at /docs/config/server.html
-->
<Server port="8005" shutdown="SHUTDOWN">
<Listener className="org.apache.catalina.startup.VersionLoggerListener" />
<!--APR library loader. Documentation at /docs/apr.html -->
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
<!-- Prevent memory leaks due to use of particular java/javax APIs-->
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
<!-- Global JNDI resources
Documentation at /docs/jndi-resources-howto.html
-->
<GlobalNamingResources>
<!-- Editable user database that can also be used by
UserDatabaseRealm to authenticate users
-->
<Resource name="UserDatabase" auth="Container"
type="org.apache.catalina.UserDatabase"
description="User database that can be updated and saved"
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
pathname="conf/tomcat-users.xml" />
</GlobalNamingResources>
<!-- A "Service" is a collection of one or more "Connectors" that share
a single "Container" Note: A "Service" is not itself a "Container",
so you may not define subcomponents such as "Valves" at this level.
Documentation at /docs/config/service.html
-->
<Service name="Catalina">
<!-- A "Connector" represents an endpoint by which requests are received
and responses are returned. Documentation at :
Java HTTP Connector: /docs/config/http.html
Java AJP Connector: /docs/config/ajp.html
APR (HTTP/AJP) Connector: /docs/apr.html
Define a non-SSL/TLS HTTP/1.1 Connector on port 8080
-->
<Connector port="${server.port}" protocol="HTTP/1.1"
connectionTimeout="20000"
redirectPort="8443"
maxThreads="${server.tomcat.max-threads}"
minSpareThreads="${server.tomcat.min-spare-threads}" />
<!-- You should set jvmRoute to support load-balancing via AJP ie :
<Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-->
<Engine name="Catalina" defaultHost="localhost">
<!-- Use the LockOutRealm to prevent attempts to guess user passwords
via a brute-force attack -->
<Realm className="org.apache.catalina.realm.LockOutRealm">
<!-- This Realm uses the UserDatabase configured in the global JNDI
resources under the key "UserDatabase". Any edits
that are performed against this UserDatabase are immediately
available for use by the Realm. -->
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
resourceName="UserDatabase"/>
</Realm>
<Host name="localhost" appBase="webapps"
unpackWARs="true" autoDeploy="true">
<!-- Access log processes all example.
Documentation at: /docs/config/valve.html
Note: The pattern used is equivalent to using pattern="common" -->
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="localhost_access_log" suffix=".txt"
pattern="%h %l %u %t &quot;%r&quot; %s %b" />
</Host>
</Engine>
</Service>
</Server>