404
Page not found
There’s nothing here.
diff --git a/.well-known/security.txt b/.well-known/security.txt new file mode 100644 index 000000000..71e46f3ec --- /dev/null +++ b/.well-known/security.txt @@ -0,0 +1,5 @@ +Contact: mailto:dsf-security@hs-heilbronn.de +Expires: 2040-12-31T22:59:00.000Z +Preferred-Languages: de,en +Canonical: https://dsf.dev/.well-known/security.txt +Policy: https://dsf.dev/security \ No newline at end of file diff --git a/404.html b/404.html new file mode 100644 index 000000000..5521a8390 --- /dev/null +++ b/404.html @@ -0,0 +1,41 @@ + + +
+ + + + + + +404 Not Found
\\n"}');export{m as comp,d as data}; diff --git a/assets/GMDS2022-dev.html-7MTMzEUe.js b/assets/GMDS2022-dev.html-7MTMzEUe.js new file mode 100644 index 000000000..fdace34ba --- /dev/null +++ b/assets/GMDS2022-dev.html-7MTMzEUe.js @@ -0,0 +1 @@ +import{_ as n}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,b as o,d as e,e as a,f as s,r as i,o as d}from"./app-BIWb5uIp.js";const m={};function p(u,t){const r=i("RouteLink");return d(),l("div",null,[o("p",null,[t[1]||(t[1]=e("Redirect to ")),a(r,{to:"/oldstable/tutorial/"},{default:s(()=>t[0]||(t[0]=[e("this webpage")])),_:1}),t[2]||(t[2]=e("."))]),t[3]||(t[3]=o("meta",{"http-equiv":"refresh",content:"0; URL=/oldstable/tutorial/"},null,-1))])}const v=n(m,[["render",p],["__file","GMDS2022-dev.html.vue"]]),D=JSON.parse('{"path":"/intro/tutorials/GMDS2022-dev.html","title":"GMDS2022 - Process Development","lang":"en-US","frontmatter":{"title":"GMDS2022 - Process Development","icon":"code","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.07,"words":20},"filePathRelative":"intro/tutorials/GMDS2022-dev.md","excerpt":"Redirect to this webpage.
\\n\\n"}');export{v as comp,D as data}; diff --git a/assets/KaTeX_AMS-Regular-BQhdFMY1.woff2 b/assets/KaTeX_AMS-Regular-BQhdFMY1.woff2 new file mode 100644 index 000000000..0acaaff03 Binary files /dev/null and b/assets/KaTeX_AMS-Regular-BQhdFMY1.woff2 differ diff --git a/assets/KaTeX_AMS-Regular-DMm9YOAa.woff b/assets/KaTeX_AMS-Regular-DMm9YOAa.woff new file mode 100644 index 000000000..b804d7b33 Binary files /dev/null and b/assets/KaTeX_AMS-Regular-DMm9YOAa.woff differ diff --git a/assets/KaTeX_AMS-Regular-DRggAlZN.ttf b/assets/KaTeX_AMS-Regular-DRggAlZN.ttf new file mode 100644 index 000000000..c6f9a5e7c Binary files /dev/null and b/assets/KaTeX_AMS-Regular-DRggAlZN.ttf differ diff --git a/assets/KaTeX_Caligraphic-Bold-ATXxdsX0.ttf b/assets/KaTeX_Caligraphic-Bold-ATXxdsX0.ttf new file mode 100644 index 000000000..9ff4a5e04 Binary files /dev/null and b/assets/KaTeX_Caligraphic-Bold-ATXxdsX0.ttf differ diff --git a/assets/KaTeX_Caligraphic-Bold-BEiXGLvX.woff b/assets/KaTeX_Caligraphic-Bold-BEiXGLvX.woff new file mode 100644 index 000000000..9759710d1 Binary files /dev/null and b/assets/KaTeX_Caligraphic-Bold-BEiXGLvX.woff differ diff --git a/assets/KaTeX_Caligraphic-Bold-Dq_IR9rO.woff2 b/assets/KaTeX_Caligraphic-Bold-Dq_IR9rO.woff2 new file mode 100644 index 000000000..f390922ec Binary files /dev/null and b/assets/KaTeX_Caligraphic-Bold-Dq_IR9rO.woff2 differ diff --git a/assets/KaTeX_Caligraphic-Regular-CTRA-rTL.woff b/assets/KaTeX_Caligraphic-Regular-CTRA-rTL.woff new file mode 100644 index 000000000..9bdd534fd Binary files /dev/null and b/assets/KaTeX_Caligraphic-Regular-CTRA-rTL.woff differ diff --git a/assets/KaTeX_Caligraphic-Regular-Di6jR-x-.woff2 b/assets/KaTeX_Caligraphic-Regular-Di6jR-x-.woff2 new file mode 100644 index 000000000..75344a1f9 Binary files /dev/null and b/assets/KaTeX_Caligraphic-Regular-Di6jR-x-.woff2 differ diff --git a/assets/KaTeX_Caligraphic-Regular-wX97UBjC.ttf b/assets/KaTeX_Caligraphic-Regular-wX97UBjC.ttf new file mode 100644 index 000000000..f522294ff Binary files /dev/null and b/assets/KaTeX_Caligraphic-Regular-wX97UBjC.ttf differ diff --git a/assets/KaTeX_Fraktur-Bold-BdnERNNW.ttf b/assets/KaTeX_Fraktur-Bold-BdnERNNW.ttf new file mode 100644 index 000000000..4e98259c3 Binary files /dev/null and b/assets/KaTeX_Fraktur-Bold-BdnERNNW.ttf differ diff --git a/assets/KaTeX_Fraktur-Bold-BsDP51OF.woff b/assets/KaTeX_Fraktur-Bold-BsDP51OF.woff new file mode 100644 index 000000000..e7730f662 Binary files /dev/null and b/assets/KaTeX_Fraktur-Bold-BsDP51OF.woff differ diff --git a/assets/KaTeX_Fraktur-Bold-CL6g_b3V.woff2 b/assets/KaTeX_Fraktur-Bold-CL6g_b3V.woff2 new file mode 100644 index 000000000..395f28bea Binary files /dev/null and b/assets/KaTeX_Fraktur-Bold-CL6g_b3V.woff2 differ diff --git a/assets/KaTeX_Fraktur-Regular-CB_wures.ttf b/assets/KaTeX_Fraktur-Regular-CB_wures.ttf new file mode 100644 index 000000000..b8461b275 Binary files /dev/null and b/assets/KaTeX_Fraktur-Regular-CB_wures.ttf differ diff --git a/assets/KaTeX_Fraktur-Regular-CTYiF6lA.woff2 b/assets/KaTeX_Fraktur-Regular-CTYiF6lA.woff2 new file mode 100644 index 000000000..735f6948d Binary files /dev/null and b/assets/KaTeX_Fraktur-Regular-CTYiF6lA.woff2 differ diff --git a/assets/KaTeX_Fraktur-Regular-Dxdc4cR9.woff b/assets/KaTeX_Fraktur-Regular-Dxdc4cR9.woff new file mode 100644 index 000000000..acab069f9 Binary files /dev/null and b/assets/KaTeX_Fraktur-Regular-Dxdc4cR9.woff differ diff --git a/assets/KaTeX_Main-Bold-Cx986IdX.woff2 b/assets/KaTeX_Main-Bold-Cx986IdX.woff2 new file mode 100644 index 000000000..ab2ad21da Binary files /dev/null and b/assets/KaTeX_Main-Bold-Cx986IdX.woff2 differ diff --git a/assets/KaTeX_Main-Bold-Jm3AIy58.woff b/assets/KaTeX_Main-Bold-Jm3AIy58.woff new file mode 100644 index 000000000..f38136ac1 Binary files /dev/null and b/assets/KaTeX_Main-Bold-Jm3AIy58.woff differ diff --git a/assets/KaTeX_Main-Bold-waoOVXN0.ttf b/assets/KaTeX_Main-Bold-waoOVXN0.ttf new file mode 100644 index 000000000..4060e627d Binary files /dev/null and b/assets/KaTeX_Main-Bold-waoOVXN0.ttf differ diff --git a/assets/KaTeX_Main-BoldItalic-DxDJ3AOS.woff2 b/assets/KaTeX_Main-BoldItalic-DxDJ3AOS.woff2 new file mode 100644 index 000000000..5931794de Binary files /dev/null and b/assets/KaTeX_Main-BoldItalic-DxDJ3AOS.woff2 differ diff --git a/assets/KaTeX_Main-BoldItalic-DzxPMmG6.ttf b/assets/KaTeX_Main-BoldItalic-DzxPMmG6.ttf new file mode 100644 index 000000000..dc007977e Binary files /dev/null and b/assets/KaTeX_Main-BoldItalic-DzxPMmG6.ttf differ diff --git a/assets/KaTeX_Main-BoldItalic-SpSLRI95.woff b/assets/KaTeX_Main-BoldItalic-SpSLRI95.woff new file mode 100644 index 000000000..67807b0bd Binary files /dev/null and b/assets/KaTeX_Main-BoldItalic-SpSLRI95.woff differ diff --git a/assets/KaTeX_Main-Italic-3WenGoN9.ttf b/assets/KaTeX_Main-Italic-3WenGoN9.ttf new file mode 100644 index 000000000..0e9b0f354 Binary files /dev/null and b/assets/KaTeX_Main-Italic-3WenGoN9.ttf differ diff --git a/assets/KaTeX_Main-Italic-BMLOBm91.woff b/assets/KaTeX_Main-Italic-BMLOBm91.woff new file mode 100644 index 000000000..6f43b594b Binary files /dev/null and b/assets/KaTeX_Main-Italic-BMLOBm91.woff differ diff --git a/assets/KaTeX_Main-Italic-NWA7e6Wa.woff2 b/assets/KaTeX_Main-Italic-NWA7e6Wa.woff2 new file mode 100644 index 000000000..b50920e13 Binary files /dev/null and b/assets/KaTeX_Main-Italic-NWA7e6Wa.woff2 differ diff --git a/assets/KaTeX_Main-Regular-B22Nviop.woff2 b/assets/KaTeX_Main-Regular-B22Nviop.woff2 new file mode 100644 index 000000000..eb24a7ba2 Binary files /dev/null and b/assets/KaTeX_Main-Regular-B22Nviop.woff2 differ diff --git a/assets/KaTeX_Main-Regular-Dr94JaBh.woff b/assets/KaTeX_Main-Regular-Dr94JaBh.woff new file mode 100644 index 000000000..21f581296 Binary files /dev/null and b/assets/KaTeX_Main-Regular-Dr94JaBh.woff differ diff --git a/assets/KaTeX_Main-Regular-ypZvNtVU.ttf b/assets/KaTeX_Main-Regular-ypZvNtVU.ttf new file mode 100644 index 000000000..dd45e1ed2 Binary files /dev/null and b/assets/KaTeX_Main-Regular-ypZvNtVU.ttf differ diff --git a/assets/KaTeX_Math-BoldItalic-B3XSjfu4.ttf b/assets/KaTeX_Math-BoldItalic-B3XSjfu4.ttf new file mode 100644 index 000000000..728ce7a1e Binary files /dev/null and b/assets/KaTeX_Math-BoldItalic-B3XSjfu4.ttf differ diff --git a/assets/KaTeX_Math-BoldItalic-CZnvNsCZ.woff2 b/assets/KaTeX_Math-BoldItalic-CZnvNsCZ.woff2 new file mode 100644 index 000000000..29657023a Binary files /dev/null and b/assets/KaTeX_Math-BoldItalic-CZnvNsCZ.woff2 differ diff --git a/assets/KaTeX_Math-BoldItalic-iY-2wyZ7.woff b/assets/KaTeX_Math-BoldItalic-iY-2wyZ7.woff new file mode 100644 index 000000000..0ae390d74 Binary files /dev/null and b/assets/KaTeX_Math-BoldItalic-iY-2wyZ7.woff differ diff --git a/assets/KaTeX_Math-Italic-DA0__PXp.woff b/assets/KaTeX_Math-Italic-DA0__PXp.woff new file mode 100644 index 000000000..eb5159d4c Binary files /dev/null and b/assets/KaTeX_Math-Italic-DA0__PXp.woff differ diff --git a/assets/KaTeX_Math-Italic-flOr_0UB.ttf b/assets/KaTeX_Math-Italic-flOr_0UB.ttf new file mode 100644 index 000000000..70d559b4e Binary files /dev/null and b/assets/KaTeX_Math-Italic-flOr_0UB.ttf differ diff --git a/assets/KaTeX_Math-Italic-t53AETM-.woff2 b/assets/KaTeX_Math-Italic-t53AETM-.woff2 new file mode 100644 index 000000000..215c143fd Binary files /dev/null and b/assets/KaTeX_Math-Italic-t53AETM-.woff2 differ diff --git a/assets/KaTeX_SansSerif-Bold-CFMepnvq.ttf b/assets/KaTeX_SansSerif-Bold-CFMepnvq.ttf new file mode 100644 index 000000000..2f65a8a3a Binary files /dev/null and b/assets/KaTeX_SansSerif-Bold-CFMepnvq.ttf differ diff --git a/assets/KaTeX_SansSerif-Bold-D1sUS0GD.woff2 b/assets/KaTeX_SansSerif-Bold-D1sUS0GD.woff2 new file mode 100644 index 000000000..cfaa3bda5 Binary files /dev/null and b/assets/KaTeX_SansSerif-Bold-D1sUS0GD.woff2 differ diff --git a/assets/KaTeX_SansSerif-Bold-DbIhKOiC.woff b/assets/KaTeX_SansSerif-Bold-DbIhKOiC.woff new file mode 100644 index 000000000..8d47c02d9 Binary files /dev/null and b/assets/KaTeX_SansSerif-Bold-DbIhKOiC.woff differ diff --git a/assets/KaTeX_SansSerif-Italic-C3H0VqGB.woff2 b/assets/KaTeX_SansSerif-Italic-C3H0VqGB.woff2 new file mode 100644 index 000000000..349c06dc6 Binary files /dev/null and b/assets/KaTeX_SansSerif-Italic-C3H0VqGB.woff2 differ diff --git a/assets/KaTeX_SansSerif-Italic-DN2j7dab.woff b/assets/KaTeX_SansSerif-Italic-DN2j7dab.woff new file mode 100644 index 000000000..7e02df963 Binary files /dev/null and b/assets/KaTeX_SansSerif-Italic-DN2j7dab.woff differ diff --git a/assets/KaTeX_SansSerif-Italic-YYjJ1zSn.ttf b/assets/KaTeX_SansSerif-Italic-YYjJ1zSn.ttf new file mode 100644 index 000000000..d5850df98 Binary files /dev/null and b/assets/KaTeX_SansSerif-Italic-YYjJ1zSn.ttf differ diff --git a/assets/KaTeX_SansSerif-Regular-BNo7hRIc.ttf b/assets/KaTeX_SansSerif-Regular-BNo7hRIc.ttf new file mode 100644 index 000000000..537279f6b Binary files /dev/null and b/assets/KaTeX_SansSerif-Regular-BNo7hRIc.ttf differ diff --git a/assets/KaTeX_SansSerif-Regular-CS6fqUqJ.woff b/assets/KaTeX_SansSerif-Regular-CS6fqUqJ.woff new file mode 100644 index 000000000..31b84829b Binary files /dev/null and b/assets/KaTeX_SansSerif-Regular-CS6fqUqJ.woff differ diff --git a/assets/KaTeX_SansSerif-Regular-DDBCnlJ7.woff2 b/assets/KaTeX_SansSerif-Regular-DDBCnlJ7.woff2 new file mode 100644 index 000000000..a90eea85f Binary files /dev/null and b/assets/KaTeX_SansSerif-Regular-DDBCnlJ7.woff2 differ diff --git a/assets/KaTeX_Script-Regular-C5JkGWo-.ttf b/assets/KaTeX_Script-Regular-C5JkGWo-.ttf new file mode 100644 index 000000000..fd679bf37 Binary files /dev/null and b/assets/KaTeX_Script-Regular-C5JkGWo-.ttf differ diff --git a/assets/KaTeX_Script-Regular-D3wIWfF6.woff2 b/assets/KaTeX_Script-Regular-D3wIWfF6.woff2 new file mode 100644 index 000000000..b3048fc11 Binary files /dev/null and b/assets/KaTeX_Script-Regular-D3wIWfF6.woff2 differ diff --git a/assets/KaTeX_Script-Regular-D5yQViql.woff b/assets/KaTeX_Script-Regular-D5yQViql.woff new file mode 100644 index 000000000..0e7da821e Binary files /dev/null and b/assets/KaTeX_Script-Regular-D5yQViql.woff differ diff --git a/assets/KaTeX_Size1-Regular-C195tn64.woff b/assets/KaTeX_Size1-Regular-C195tn64.woff new file mode 100644 index 000000000..7f292d911 Binary files /dev/null and b/assets/KaTeX_Size1-Regular-C195tn64.woff differ diff --git a/assets/KaTeX_Size1-Regular-Dbsnue_I.ttf b/assets/KaTeX_Size1-Regular-Dbsnue_I.ttf new file mode 100644 index 000000000..871fd7d19 Binary files /dev/null and b/assets/KaTeX_Size1-Regular-Dbsnue_I.ttf differ diff --git a/assets/KaTeX_Size1-Regular-mCD8mA8B.woff2 b/assets/KaTeX_Size1-Regular-mCD8mA8B.woff2 new file mode 100644 index 000000000..c5a8462fb Binary files /dev/null and b/assets/KaTeX_Size1-Regular-mCD8mA8B.woff2 differ diff --git a/assets/KaTeX_Size2-Regular-B7gKUWhC.ttf b/assets/KaTeX_Size2-Regular-B7gKUWhC.ttf new file mode 100644 index 000000000..7a212caf9 Binary files /dev/null and b/assets/KaTeX_Size2-Regular-B7gKUWhC.ttf differ diff --git a/assets/KaTeX_Size2-Regular-Dy4dx90m.woff2 b/assets/KaTeX_Size2-Regular-Dy4dx90m.woff2 new file mode 100644 index 000000000..e1bccfe24 Binary files /dev/null and b/assets/KaTeX_Size2-Regular-Dy4dx90m.woff2 differ diff --git a/assets/KaTeX_Size2-Regular-oD1tc_U0.woff b/assets/KaTeX_Size2-Regular-oD1tc_U0.woff new file mode 100644 index 000000000..d241d9be2 Binary files /dev/null and b/assets/KaTeX_Size2-Regular-oD1tc_U0.woff differ diff --git a/assets/KaTeX_Size3-Regular-CTq5MqoE.woff b/assets/KaTeX_Size3-Regular-CTq5MqoE.woff new file mode 100644 index 000000000..e6e9b658d Binary files /dev/null and b/assets/KaTeX_Size3-Regular-CTq5MqoE.woff differ diff --git a/assets/KaTeX_Size3-Regular-DgpXs0kz.ttf b/assets/KaTeX_Size3-Regular-DgpXs0kz.ttf new file mode 100644 index 000000000..00bff3495 Binary files /dev/null and b/assets/KaTeX_Size3-Regular-DgpXs0kz.ttf differ diff --git a/assets/KaTeX_Size4-Regular-BF-4gkZK.woff b/assets/KaTeX_Size4-Regular-BF-4gkZK.woff new file mode 100644 index 000000000..e1ec54576 Binary files /dev/null and b/assets/KaTeX_Size4-Regular-BF-4gkZK.woff differ diff --git a/assets/KaTeX_Size4-Regular-DWFBv043.ttf b/assets/KaTeX_Size4-Regular-DWFBv043.ttf new file mode 100644 index 000000000..74f08921f Binary files /dev/null and b/assets/KaTeX_Size4-Regular-DWFBv043.ttf differ diff --git a/assets/KaTeX_Size4-Regular-Dl5lxZxV.woff2 b/assets/KaTeX_Size4-Regular-Dl5lxZxV.woff2 new file mode 100644 index 000000000..680c13085 Binary files /dev/null and b/assets/KaTeX_Size4-Regular-Dl5lxZxV.woff2 differ diff --git a/assets/KaTeX_Typewriter-Regular-C0xS9mPB.woff b/assets/KaTeX_Typewriter-Regular-C0xS9mPB.woff new file mode 100644 index 000000000..2432419f2 Binary files /dev/null and b/assets/KaTeX_Typewriter-Regular-C0xS9mPB.woff differ diff --git a/assets/KaTeX_Typewriter-Regular-CO6r4hn1.woff2 b/assets/KaTeX_Typewriter-Regular-CO6r4hn1.woff2 new file mode 100644 index 000000000..771f1af70 Binary files /dev/null and b/assets/KaTeX_Typewriter-Regular-CO6r4hn1.woff2 differ diff --git a/assets/KaTeX_Typewriter-Regular-D3Ib7_Hf.ttf b/assets/KaTeX_Typewriter-Regular-D3Ib7_Hf.ttf new file mode 100644 index 000000000..c83252c57 Binary files /dev/null and b/assets/KaTeX_Typewriter-Regular-D3Ib7_Hf.ttf differ diff --git a/assets/MIE2023.html-DGWUgkfh.js b/assets/MIE2023.html-DGWUgkfh.js new file mode 100644 index 000000000..a952a83b9 --- /dev/null +++ b/assets/MIE2023.html-DGWUgkfh.js @@ -0,0 +1 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as a,a as i,o as r}from"./app-BIWb5uIp.js";const s="/photos/tutorials/dsf-architecture.svg",n={};function o(l,e){return r(),a("div",null,e[0]||(e[0]=[i('With this tutorial, participants will gain a detailed insight into the Data Sharing Framework (DSF) (https://github.com/highmed/highmed-dsf). The open source DSF enables users to execute biomedical research and healthcare delivery processes across organizations, and the tutorial will demonstrate this with examples from the German Medical Informatics Initiative (MII) (https://www.medizininformatik-initiative.de/en/start) funded by the Federal Ministry of Education and Research (BMBF). The tutorial will cover fundamental concepts of distributed processes, the DSFs architecture and key standards such as HL7 FHIR and BPMN 2.0. Participants will have the opportunity to gain hands-on experience with the DSF by working with different processes in a lab setting. Technical aspects such as authentication and authorization will be covered, as well as guidance on using the DSF for other use cases. This tutorial is designed for those involved in distributed research projects, including project members and software developers, as well as individuals interested in multi-organizational research projects.
The use of real-world data collected by healthcare providers for bio-medical research requires the data to be available and accessible. This is especially important when considering the usage of data across organizations. Distributed processes such as feasibility studies, record linkage, and data sharing need to be established for effective consolidation and analysis.
While even multi-center retrospective studies using real-world data become commonplace, maintaining infrastructure components for every project conducted is not sustainable. To address this, the Data Sharing Framework (DSF) was developed as an open source, use case independent distributed business process engine to execute cross-organizational research and healthcare delivery processes.
The tutorial will cover the fundamental concepts of distributed processes and will provide an overview of the DSF's architecture and key standards such as HL7 FHIR and BPMN 2.0 that are used in the framework. The tutorial will present real-world examples to illustrate the DSFs capabilities and delve into specific issues the DSF addresses for the German Medical Informatics Initiative (MII).
Participants will have the opportunity to gain hands-on experience with the DSF by working with different processes in a lab setting. Technical aspects such as authentication and authorization, onboarding of new organizations and guidance on using the DSF for other use cases, such as specific research projects or new infrastructure developments, will be covered.
Finally, the tutorial will present lessons learned from the last years of using the DSF in test and production environments in multiple research consortia and will present ideas for future improvements based on feedback from end-users, process developers, project managers and administrative staff.
Decision makers and project members with data sharing ideas looking for an architecture, as well as medical informatics professionals and software developers tasked with building multi-centric data sharing applications, are the target audience for this tutorial.
You need a laptop with a web browser for the hands-on portion of the tutorial.
You will...
With this tutorial, participants will gain a detailed insight into the Data Sharing Framework (DSF) (https://github.com/highmed/highmed-dsf). The open source DSF enables users to execute biomedical research and healthcare delivery processes across organizations, and the tutorial will demonstrate this with examples from the German Medical Informatics Initiative (MII) (https://www.medizininformatik-initiative.de/en/start) funded by the Federal Ministry of Education and Research (BMBF). The tutorial will cover fundamental concepts of distributed processes, the DSFs architecture and key standards such as HL7 FHIR and BPMN 2.0. Participants will have the opportunity to gain hands-on experience with the DSF by working with different processes in a lab setting. Technical aspects such as authentication and authorization will be covered, as well as guidance on using the DSF for other use cases. This tutorial is designed for those involved in distributed research projects, including project members and software developers, as well as individuals interested in multi-organizational research projects.
"}');export{c as comp,p as data}; diff --git a/assets/SearchResult-DDOJWxca.js b/assets/SearchResult-DDOJWxca.js new file mode 100644 index 000000000..5947319bd --- /dev/null +++ b/assets/SearchResult-DDOJWxca.js @@ -0,0 +1 @@ +import{u as G,g as et,h as at,i as _,j as it,P as nt,t as lt,k as rt,l as E,m as k,n as ot,w as N,p as a,q as mt,R as M,v as st,x as vt,y as ht,C as ut,z as ct,A as dt,B as pt,D as ft,E as gt,F as bt,G as yt,H as $,I as j,J as xt,K as R,L as wt}from"./app-BIWb5uIp.js";const Ht=["/","/about/","/for-you/","/for-you/learn.html","/hackathon/","/intro/","/intro/publications.html","/oldstable/","/oldstable/introduction.html","/security/","/sprechstunde/","/stable/dsf-for-dev.html","/stable/","/stable/process-plugins-advanced.html","/v1.0.0/dsf-for-dev.html","/v1.0.0/","/v1.0.0/process-plugins-advanced.html","/v1.1.0/dsf-for-dev.html","/v1.1.0/","/v1.1.0/process-plugins-advanced.html","/v1.2.0/dsf-for-dev.html","/v1.2.0/","/v1.2.0/process-plugins-advanced.html","/v1.3.0/dsf-for-dev.html","/v1.3.0/","/v1.3.0/process-plugins-advanced.html","/v1.3.1/dsf-for-dev.html","/v1.3.1/","/v1.3.1/process-plugins-advanced.html","/v1.3.2/dsf-for-dev.html","/v1.3.2/","/v1.3.2/process-plugins-advanced.html","/v1.4.0/dsf-for-dev.html","/v1.4.0/","/v1.4.0/process-plugins-advanced.html","/v1.5.0/dsf-for-dev.html","/v1.5.0/","/v1.5.0/process-plugins-advanced.html","/v1.5.1/dsf-for-dev.html","/v1.5.1/","/v1.5.1/process-plugins-advanced.html","/v1.5.2/dsf-for-dev.html","/v1.5.2/","/v1.5.2/process-plugins-advanced.html","/v1.6.0/dsf-for-dev.html","/v1.6.0/","/v1.6.0/process-plugins-advanced.html","/v1.7.0/dsf-for-dev.html","/v1.7.0/","/v1.7.0/process-plugins-advanced.html","/about/learnmore/","/about/learnmore/contact.html","/about/learnmore/partners.html","/about/learnmore/public.html","/about/learnmore/team.html","/intro/info/allowList.html","/intro/info/architecture.html","/intro/info/basics.html","/intro/info/introduction.html","/intro/info/networkSetup.html","/intro/info/process-plugins.html","/intro/info/security.html","/intro/tutorials/GMDS2022-dev.html","/intro/tutorials/MIE2023.html","/intro/tutorials/","/intro/tutorials/Talks.html","/intro/use-cases/","/intro/use-cases/feasibility.html","/intro/use-cases/num.html","/oldstable/build/","/oldstable/build/build.html","/oldstable/build/proxyTestTool.html","/oldstable/build/releaseANewVersion.html","/oldstable/code/","/oldstable/code/addingANewFhirR.html","/oldstable/code/addingANewMpiClient.html","/oldstable/code/addingANewOpenEhrClient.html","/oldstable/code/changingBpmnProcesses.html","/oldstable/code/code.html","/oldstable/code/eclipseContent.html","/oldstable/code/intelliJContent.html","/oldstable/code/libraries.html","/oldstable/code/usingTheGitHubMaven.html","/oldstable/generalinformation/","/oldstable/generalinformation/authentication.html","/oldstable/generalinformation/networkSetup.html","/oldstable/releases/","/oldstable/releases/configBpe.html","/oldstable/releases/configFhir.html","/oldstable/releases/configFhirReverseProxy.html","/oldstable/releases/highmedInstall.html","/oldstable/releases/num-codexInstall.html","/oldstable/releases/upgradeFrom7.html","/oldstable/releases/upgradeFrom8.html","/oldstable/releases/upgradeFrom90.html","/oldstable/releases/upgradeFrom91.html","/oldstable/releases/upgradeFrom92.html","/oldstable/tutorial/","/oldstable/tutorial/ex11-docker-composeyml.html","/oldstable/tutorial/exercise1-simpleProcess.html","/oldstable/tutorial/exercise11-processDebugging.html","/oldstable/tutorial/exercise2-inputParameters.html","/oldstable/tutorial/exercise3-messageEvents.html","/oldstable/tutorial/exercise4-exclusiveGateways.html","/oldstable/tutorial/exercise5-eventBasedGateways.html","/oldstable/tutorial/prerequisites.html","/stable/contribute/code.html","/stable/contribute/documentation.html","/stable/contribute/","/stable/develop/","/stable/develop/create.html","/stable/develop/upgrade-from-0.html","/stable/maintain/","/stable/maintain/allowList-mgm.html","/stable/maintain/install-plugins.html","/stable/maintain/install.html","/stable/maintain/passwords-secrets.html","/stable/maintain/root-certificates.html","/stable/maintain/upgrade-from-0.html","/stable/maintain/upgrade-from-1.html","/v1.0.0/develop/","/v1.0.0/develop/create.html","/v1.0.0/develop/upgrade-from-0.html","/v1.0.0/maintain/","/v1.0.0/maintain/allowList-mgm.html","/v1.0.0/maintain/install.html","/v1.0.0/maintain/upgrade-from-0.html","/v1.1.0/develop/","/v1.1.0/develop/create.html","/v1.1.0/develop/upgrade-from-0.html","/v1.1.0/maintain/","/v1.1.0/maintain/allowList-mgm.html","/v1.1.0/maintain/install.html","/v1.1.0/maintain/upgrade-from-0.html","/v1.1.0/maintain/upgrade-from-1.html","/v1.2.0/develop/","/v1.2.0/develop/create.html","/v1.2.0/develop/upgrade-from-0.html","/v1.2.0/maintain/","/v1.2.0/maintain/allowList-mgm.html","/v1.2.0/maintain/install.html","/v1.2.0/maintain/upgrade-from-0.html","/v1.2.0/maintain/upgrade-from-1.html","/v1.3.0/develop/","/v1.3.0/develop/create.html","/v1.3.0/develop/upgrade-from-0.html","/v1.3.0/maintain/","/v1.3.0/maintain/allowList-mgm.html","/v1.3.0/maintain/install-plugins.html","/v1.3.0/maintain/install.html","/v1.3.0/maintain/upgrade-from-0.html","/v1.3.0/maintain/upgrade-from-1.html","/v1.3.1/develop/","/v1.3.1/develop/create.html","/v1.3.1/develop/upgrade-from-0.html","/v1.3.1/maintain/","/v1.3.1/maintain/allowList-mgm.html","/v1.3.1/maintain/install-plugins.html","/v1.3.1/maintain/install.html","/v1.3.1/maintain/upgrade-from-0.html","/v1.3.1/maintain/upgrade-from-1.html","/v1.3.2/develop/","/v1.3.2/develop/create.html","/v1.3.2/develop/upgrade-from-0.html","/v1.3.2/maintain/","/v1.3.2/maintain/allowList-mgm.html","/v1.3.2/maintain/install-plugins.html","/v1.3.2/maintain/install.html","/v1.3.2/maintain/upgrade-from-0.html","/v1.3.2/maintain/upgrade-from-1.html","/v1.4.0/contribute/code.html","/v1.4.0/contribute/documentation.html","/v1.4.0/contribute/","/v1.4.0/develop/","/v1.4.0/develop/create.html","/v1.4.0/develop/upgrade-from-0.html","/v1.4.0/maintain/","/v1.4.0/maintain/allowList-mgm.html","/v1.4.0/maintain/install-plugins.html","/v1.4.0/maintain/install.html","/v1.4.0/maintain/upgrade-from-0.html","/v1.4.0/maintain/upgrade-from-1.html","/v1.5.0/contribute/code.html","/v1.5.0/contribute/documentation.html","/v1.5.0/contribute/","/v1.5.0/develop/","/v1.5.0/develop/create.html","/v1.5.0/develop/upgrade-from-0.html","/v1.5.0/maintain/","/v1.5.0/maintain/allowList-mgm.html","/v1.5.0/maintain/install-plugins.html","/v1.5.0/maintain/install.html","/v1.5.0/maintain/upgrade-from-0.html","/v1.5.0/maintain/upgrade-from-1.html","/v1.5.1/contribute/code.html","/v1.5.1/contribute/documentation.html","/v1.5.1/contribute/","/v1.5.1/develop/","/v1.5.1/develop/create.html","/v1.5.1/develop/upgrade-from-0.html","/v1.5.1/maintain/","/v1.5.1/maintain/allowList-mgm.html","/v1.5.1/maintain/install-plugins.html","/v1.5.1/maintain/install.html","/v1.5.1/maintain/upgrade-from-0.html","/v1.5.1/maintain/upgrade-from-1.html","/v1.5.2/contribute/code.html","/v1.5.2/contribute/documentation.html","/v1.5.2/contribute/","/v1.5.2/develop/","/v1.5.2/develop/create.html","/v1.5.2/develop/upgrade-from-0.html","/v1.5.2/maintain/","/v1.5.2/maintain/allowList-mgm.html","/v1.5.2/maintain/install-plugins.html","/v1.5.2/maintain/install.html","/v1.5.2/maintain/upgrade-from-0.html","/v1.5.2/maintain/upgrade-from-1.html","/v1.6.0/contribute/code.html","/v1.6.0/contribute/documentation.html","/v1.6.0/contribute/","/v1.6.0/develop/","/v1.6.0/develop/create.html","/v1.6.0/develop/upgrade-from-0.html","/v1.6.0/maintain/","/v1.6.0/maintain/allowList-mgm.html","/v1.6.0/maintain/install-plugins.html","/v1.6.0/maintain/install.html","/v1.6.0/maintain/upgrade-from-0.html","/v1.6.0/maintain/upgrade-from-1.html","/v1.7.0/contribute/code.html","/v1.7.0/contribute/documentation.html","/v1.7.0/contribute/","/v1.7.0/develop/","/v1.7.0/develop/create.html","/v1.7.0/develop/upgrade-from-0.html","/v1.7.0/maintain/","/v1.7.0/maintain/allowList-mgm.html","/v1.7.0/maintain/install-plugins.html","/v1.7.0/maintain/install.html","/v1.7.0/maintain/passwords-secrets.html","/v1.7.0/maintain/root-certificates.html","/v1.7.0/maintain/upgrade-from-0.html","/v1.7.0/maintain/upgrade-from-1.html","/stable/maintain/bpe/","/stable/maintain/bpe/access-control.html","/stable/maintain/bpe/configuration.html","/stable/maintain/bpe/oidc.html","/stable/maintain/bpe-reverse-proxy/","/stable/maintain/bpe-reverse-proxy/configuration.html","/stable/maintain/fhir/","/stable/maintain/fhir/access-control.html","/stable/maintain/fhir/configuration.html","/stable/maintain/fhir/oidc.html","/stable/maintain/fhir-reverse-proxy/","/stable/maintain/fhir-reverse-proxy/configuration.html","/v1.0.0/maintain/configuration/","/v1.0.0/maintain/configuration/bpe.html","/v1.0.0/maintain/configuration/common.html","/v1.0.0/maintain/configuration/fhir.html","/v1.0.0/maintain/configuration/reverseproxy.html","/v1.1.0/maintain/bpe/","/v1.1.0/maintain/bpe/configuration.html","/v1.1.0/maintain/fhir/","/v1.1.0/maintain/fhir/access-control.html","/v1.1.0/maintain/fhir/configuration.html","/v1.1.0/maintain/fhir/oidc.html","/v1.1.0/maintain/fhir-reverse-proxy/","/v1.1.0/maintain/fhir-reverse-proxy/configuration.html","/v1.2.0/maintain/bpe/","/v1.2.0/maintain/bpe/configuration.html","/v1.2.0/maintain/fhir/","/v1.2.0/maintain/fhir/access-control.html","/v1.2.0/maintain/fhir/configuration.html","/v1.2.0/maintain/fhir/oidc.html","/v1.2.0/maintain/fhir-reverse-proxy/","/v1.2.0/maintain/fhir-reverse-proxy/configuration.html","/v1.3.0/maintain/bpe/","/v1.3.0/maintain/bpe/configuration.html","/v1.3.0/maintain/fhir/","/v1.3.0/maintain/fhir/access-control.html","/v1.3.0/maintain/fhir/configuration.html","/v1.3.0/maintain/fhir/oidc.html","/v1.3.0/maintain/fhir-reverse-proxy/","/v1.3.0/maintain/fhir-reverse-proxy/configuration.html","/v1.3.1/maintain/bpe/","/v1.3.1/maintain/bpe/configuration.html","/v1.3.1/maintain/fhir/","/v1.3.1/maintain/fhir/access-control.html","/v1.3.1/maintain/fhir/configuration.html","/v1.3.1/maintain/fhir/oidc.html","/v1.3.1/maintain/fhir-reverse-proxy/","/v1.3.1/maintain/fhir-reverse-proxy/configuration.html","/v1.3.2/maintain/bpe/","/v1.3.2/maintain/bpe/configuration.html","/v1.3.2/maintain/fhir/","/v1.3.2/maintain/fhir/access-control.html","/v1.3.2/maintain/fhir/configuration.html","/v1.3.2/maintain/fhir/oidc.html","/v1.3.2/maintain/fhir-reverse-proxy/","/v1.3.2/maintain/fhir-reverse-proxy/configuration.html","/v1.4.0/maintain/bpe/","/v1.4.0/maintain/bpe/configuration.html","/v1.4.0/maintain/fhir/","/v1.4.0/maintain/fhir/access-control.html","/v1.4.0/maintain/fhir/configuration.html","/v1.4.0/maintain/fhir/oidc.html","/v1.4.0/maintain/fhir-reverse-proxy/","/v1.4.0/maintain/fhir-reverse-proxy/configuration.html","/v1.5.0/maintain/bpe/","/v1.5.0/maintain/bpe/access-control.html","/v1.5.0/maintain/bpe/configuration.html","/v1.5.0/maintain/bpe/oidc.html","/v1.5.0/maintain/bpe-reverse-proxy/","/v1.5.0/maintain/bpe-reverse-proxy/configuration.html","/v1.5.0/maintain/fhir/","/v1.5.0/maintain/fhir/access-control.html","/v1.5.0/maintain/fhir/configuration.html","/v1.5.0/maintain/fhir/oidc.html","/v1.5.0/maintain/fhir-reverse-proxy/","/v1.5.0/maintain/fhir-reverse-proxy/configuration.html","/v1.5.1/maintain/bpe/","/v1.5.1/maintain/bpe/access-control.html","/v1.5.1/maintain/bpe/configuration.html","/v1.5.1/maintain/bpe/oidc.html","/v1.5.1/maintain/bpe-reverse-proxy/","/v1.5.1/maintain/bpe-reverse-proxy/configuration.html","/v1.5.1/maintain/fhir/","/v1.5.1/maintain/fhir/access-control.html","/v1.5.1/maintain/fhir/configuration.html","/v1.5.1/maintain/fhir/oidc.html","/v1.5.1/maintain/fhir-reverse-proxy/","/v1.5.1/maintain/fhir-reverse-proxy/configuration.html","/v1.5.2/maintain/bpe/","/v1.5.2/maintain/bpe/access-control.html","/v1.5.2/maintain/bpe/configuration.html","/v1.5.2/maintain/bpe/oidc.html","/v1.5.2/maintain/bpe-reverse-proxy/","/v1.5.2/maintain/bpe-reverse-proxy/configuration.html","/v1.5.2/maintain/fhir/","/v1.5.2/maintain/fhir/access-control.html","/v1.5.2/maintain/fhir/configuration.html","/v1.5.2/maintain/fhir/oidc.html","/v1.5.2/maintain/fhir-reverse-proxy/","/v1.5.2/maintain/fhir-reverse-proxy/configuration.html","/v1.6.0/maintain/bpe/","/v1.6.0/maintain/bpe/access-control.html","/v1.6.0/maintain/bpe/configuration.html","/v1.6.0/maintain/bpe/oidc.html","/v1.6.0/maintain/bpe-reverse-proxy/","/v1.6.0/maintain/bpe-reverse-proxy/configuration.html","/v1.6.0/maintain/fhir/","/v1.6.0/maintain/fhir/access-control.html","/v1.6.0/maintain/fhir/configuration.html","/v1.6.0/maintain/fhir/oidc.html","/v1.6.0/maintain/fhir-reverse-proxy/","/v1.6.0/maintain/fhir-reverse-proxy/configuration.html","/v1.7.0/maintain/bpe/","/v1.7.0/maintain/bpe/access-control.html","/v1.7.0/maintain/bpe/configuration.html","/v1.7.0/maintain/bpe/oidc.html","/v1.7.0/maintain/bpe-reverse-proxy/","/v1.7.0/maintain/bpe-reverse-proxy/configuration.html","/v1.7.0/maintain/fhir/","/v1.7.0/maintain/fhir/access-control.html","/v1.7.0/maintain/fhir/configuration.html","/v1.7.0/maintain/fhir/oidc.html","/v1.7.0/maintain/fhir-reverse-proxy/","/v1.7.0/maintain/fhir-reverse-proxy/configuration.html","/404.html","/intro/info/"],Lt="SEARCH_PRO_QUERY_HISTORY",g=G(Lt,[]),kt=()=>{const{queryHistoryCount:i}=R,n=i>0;return{enabled:n,queryHistory:g,addQueryHistory:l=>{n&&(g.value=Array.from(new Set([l,...g.value.slice(0,i-1)])))},removeQueryHistory:l=>{g.value=[...g.value.slice(0,l),...g.value.slice(l+1)]}}},P=i=>Ht[i.id]+("anchor"in i?`#${i.anchor}`:""),Rt="SEARCH_PRO_RESULT_HISTORY",{resultHistoryCount:B}=R,b=G(Rt,[]),Qt=()=>{const i=B>0;return{enabled:i,resultHistory:b,addResultHistory:n=>{if(i){const l={link:P(n),display:n.display};"header"in n&&(l.header=n.header),b.value=[l,...b.value.slice(0,B-1)]}},removeResultHistory:n=>{b.value=[...b.value.slice(0,n),...b.value.slice(n+1)]}}},qt=i=>{const n=ut(),l=_(),Q=ct(),o=E(0),w=k(()=>o.value>0),d=dt([]);return pt(()=>{const{search:p,terminate:q}=ft(),y=xt(v=>{const x=v.join(" "),{searchFilter:C=c=>c,splitWord:S,suggestionsFilter:T,...f}=n.value;x?(o.value+=1,p(v.join(" "),l.value,f).then(c=>C(c,x,l.value,Q.value)).then(c=>{o.value-=1,d.value=c}).catch(c=>{console.warn(c),o.value-=1,o.value||(d.value=[])})):d.value=[]},R.searchDelay-R.suggestDelay);N([i,l],([v])=>y(v),{immediate:!0}),gt(()=>{q()})}),{isSearching:w,results:d}};var St=et({name:"SearchResult",props:{queries:{type:Array,required:!0},isFocusing:Boolean},emits:["close","updateQuery"],setup(i,{emit:n}){const l=at(),Q=_(),o=it(nt),{enabled:w,addQueryHistory:d,queryHistory:p,removeQueryHistory:q}=kt(),{enabled:y,resultHistory:v,addResultHistory:x,removeResultHistory:C}=Qt(),S=w||y,T=lt(i,"queries"),{results:f,isSearching:c}=qt(T),r=rt({isQuery:!0,index:0}),h=E(0),u=E(0),I=k(()=>S&&(p.value.length>0||v.value.length>0)),F=k(()=>f.value.length>0),A=k(()=>f.value[h.value]||null),U=()=>{const{isQuery:t,index:e}=r;e===0?(r.isQuery=!t,r.index=t?v.value.length-1:p.value.length-1):r.index=e-1},Y=()=>{const{isQuery:t,index:e}=r;e===(t?p.value.length-1:v.value.length-1)?(r.isQuery=!t,r.index=0):r.index=e+1},J=()=>{h.value=h.value>0?h.value-1:f.value.length-1,u.value=A.value.contents.length-1},V=()=>{h.value=h.valueRedirect to this webpage.
\\n\\n"}');export{f as comp,b as data}; diff --git a/assets/access-control.html-4BKEhf1P.js b/assets/access-control.html-4BKEhf1P.js new file mode 100644 index 000000000..d7c172469 --- /dev/null +++ b/assets/access-control.html-4BKEhf1P.js @@ -0,0 +1,39 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-B60MdMg4.js b/assets/access-control.html-B60MdMg4.js new file mode 100644 index 000000000..efe185904 --- /dev/null +++ b/assets/access-control.html-B60MdMg4.js @@ -0,0 +1,39 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
To types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-CCsuodqK.js b/assets/access-control.html-CCsuodqK.js new file mode 100644 index 000000000..fb81151da --- /dev/null +++ b/assets/access-control.html-CCsuodqK.js @@ -0,0 +1,39 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
To types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-CJEaNEvV.js b/assets/access-control.html-CJEaNEvV.js new file mode 100644 index 000000000..bdd633636 --- /dev/null +++ b/assets/access-control.html-CJEaNEvV.js @@ -0,0 +1,21 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
Access to the user interface can be enabled for client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable access for a specific client-certificate:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - ADMIN
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the user interface. Allowed values are:
ADMIN
.
The BPE server currently does not support any practionier-roles.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - ADMIN
The third example allows administrator access and users e-mail addresses to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - email-admins:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - ADMIN
The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
\\nOpenID Connect
\\nTo enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
\\nThe DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-CP9FgeAe.js b/assets/access-control.html-CP9FgeAe.js new file mode 100644 index 000000000..5dcc3aee4 --- /dev/null +++ b/assets/access-control.html-CP9FgeAe.js @@ -0,0 +1,21 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
Access to the user interface can be enabled for client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable access for a specific client-certificate:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - ADMIN
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the user interface. Allowed values are:
ADMIN
.
The BPE server currently does not support any practionier-roles.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - ADMIN
The third example allows administrator access and users e-mail addresses to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - email-admins:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - ADMIN
The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
\\nOpenID Connect
\\nTo enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
\\nThe DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
To types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-CRP3chBx.js b/assets/access-control.html-CRP3chBx.js new file mode 100644 index 000000000..821df584f --- /dev/null +++ b/assets/access-control.html-CRP3chBx.js @@ -0,0 +1,21 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
Access to the user interface can be enabled for client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable access for a specific client-certificate:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - ADMIN
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the user interface. Allowed values are:
ADMIN
.
The BPE server currently does not support any practionier-roles.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - ADMIN
The third example allows administrator access and users e-mail addresses to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - email-admins:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - ADMIN
The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
\\nOpenID Connect
\\nTo enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
\\nThe DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
To types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-ChATt82A.js b/assets/access-control.html-ChATt82A.js new file mode 100644 index 000000000..1720f56cf --- /dev/null +++ b/assets/access-control.html-ChATt82A.js @@ -0,0 +1,39 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
To types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-CqlKT6aL.js b/assets/access-control.html-CqlKT6aL.js new file mode 100644 index 000000000..504300b1c --- /dev/null +++ b/assets/access-control.html-CqlKT6aL.js @@ -0,0 +1,39 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
To types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-DCPRLpIt.js b/assets/access-control.html-DCPRLpIt.js new file mode 100644 index 000000000..7620936d3 --- /dev/null +++ b/assets/access-control.html-DCPRLpIt.js @@ -0,0 +1,39 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-DgJ2jQps.js b/assets/access-control.html-DgJ2jQps.js new file mode 100644 index 000000000..ebf0d3092 --- /dev/null +++ b/assets/access-control.html-DgJ2jQps.js @@ -0,0 +1,21 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
Access to the user interface can be enabled for client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable access for a specific client-certificate:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - ADMIN
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the user interface. Allowed values are:
ADMIN
.
The BPE server currently does not support any practionier-roles.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - ADMIN
The third example allows administrator access and users e-mail addresses to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - email-admins:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - ADMIN
The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
\\nOpenID Connect
\\nTo enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
\\nThe DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-DwvV-1yc.js b/assets/access-control.html-DwvV-1yc.js new file mode 100644 index 000000000..e87cc5c0e --- /dev/null +++ b/assets/access-control.html-DwvV-1yc.js @@ -0,0 +1,21 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
Access to the user interface can be enabled for client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable access for a specific client-certificate:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - ADMIN
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the user interface. Allowed values are:
ADMIN
.
The BPE server currently does not support any practionier-roles.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - ADMIN
The third example allows administrator access and users e-mail addresses to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - email-admins:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - ADMIN
The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
\\nOpenID Connect
\\nTo enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
\\nThe DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-OtE7EOFQ.js b/assets/access-control.html-OtE7EOFQ.js new file mode 100644 index 000000000..2d211d970 --- /dev/null +++ b/assets/access-control.html-OtE7EOFQ.js @@ -0,0 +1,39 @@ +import{_ as s}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),i("div",null,e[0]||(e[0]=[a(`The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF FHIR server OpenID Connect configuration page.
Access to the API and user interface can be enabled for additional client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable read access for a specific client-certificate:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_FHIR_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the REST API and user interface. Allowed values are:
CREATE
, READ
, UPDATE
, DELETE
, SEARCH
, HISTORY
, PERMANENT_DELETE
and WEBSOCKET
.
In order to allow users to start processes, the property practitioner-role
can be used to assign codes from FHIR CodeSystem resources. Codes are specified in the form system-url|code
.
If the uses has a code specified here that match with a requester
extension within the process plugin's ActivityDefinition resource, the user can start the process if he also has the dsf-role
CREATE
.
Process plugins can defined and use there own code-systems. However, the DSF specifies a standard set of practitioner roles within the CodeSystem http://dsf.dev/fhir/CodeSystem/practitioner-role
:
UAC_USER
, COS_USER
, CRR_USER
, DIC_USER
, DMS_USER
, DTS_USER
, HRP_USER
, TTP_USER
, AMS_USER
and DSF_ADMIN
.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - CREATE
+ - READ
+ - UPDATE
+ - DELETE
+ - SEARCH
+ - HISTORY
+ practitioner-role:
+ - http://dsf.dev/fhir/CodeSystem/practitioner-role|DSF_ADMIN
The third example allows read-only access. Two e-mail addresses are used to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
+ - read-only:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - READ
+ - SEARCH
+ - HISTORY
The DSF FHIR server implements a subset of the FHIR R4 REST API. When accessing the API with a web browser a limited graphical user interface is shown. Without any additional configuration the API and user interface is only accessible with the X.509 client certificate configured for the organization via the configuration parameter: DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT
"}');export{h as comp,o as data}; diff --git a/assets/access-control.html-WxdBljNr.js b/assets/access-control.html-WxdBljNr.js new file mode 100644 index 000000000..03c9141b2 --- /dev/null +++ b/assets/access-control.html-WxdBljNr.js @@ -0,0 +1,21 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),s("div",null,e[0]||(e[0]=[a(`The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
OpenID Connect
To enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
Access to the user interface can be enabled for client certificates and local users authenticating via OAuth 2.0 OpenID Connect. Access can be configured for so called roles, with all roles specified using the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG. The value for this environment variable is specified as YAML using the block scalar |
.
The listing below shows a minimal configuration to enable access for a specific client-certificate:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - example_read_only_role:
+ thumbprint: 00474993fa261b0225f93c5a66aa6fcc... [a-f0-9]{128}
+ dsf-role:
+ - ADMIN
The list of user roles above contains a single rule-entry example_read_only_role
, matching the user via a client certificate SHA-512 thumprint and assigning three DSF roles. Any string can be used as the name for the rule-enty.
Certificate Thumbprints
SHA-512 certificate thumbprints in HEX form [a-f0-9]{128}
can be calculated using:
certtool --fingerprint --hash=sha512 --infile=certificate.pem
Multiple user roles can be specified and all matching roles will be applied to an authenticated users. Use an empty string ""
or a single block scalar |
character as the value for the configuration parameter DEV_DSF_BPE_SERVER_ROLECONFIG if no roles should be configured.
To apply roles, users can be matched via the thumbprint
, email
, token-role
or token-group
properties. A single value or a list of values can be specified.
The property thumbprint
can used to specify one or multiple SHA-512 certificate thumbprints. Roles from this rule are applied to the authenticating user if the certificate matches one of the specified thumbprints.
Using the property email
users can be matched against e-mail addresses specified in X.509 client certificates and in OpenID Connect access tokens. Values will be matched against e-mail addresses specified in the subject DN (via PKCS#9 extension 1.2.840.113549.1.9.1) and RFC-822 Name entries of the Subject Alternative Name field. If the user authenticates via OpenID Connect, the email
claim from the access token will be matched against the property values.
With the properties token-role
and token-group
role and group names can be specified to match against role and group claims within OAuth 2.0 access tokens.
Two types of roles can be applied to matched users.
DSF roles specified via the dsf-role
property define general access to the user interface. Allowed values are:
ADMIN
.
The BPE server currently does not support any practionier-roles.
The first example defines a group of DSF administrators. Two client certificates match against this role:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - certificate-admins:
+ thumbprint:
+ - afb68b1d9d47e691b8b3d50fd9848467cada8b1c76f5f4b45f00c9f8432d505361a3ee27805f4aa06799d9ac8dace94b3f1942fce44d84866961259b13be825d
+ - 2441bfddcad97eeb83c8c31fe181b90652787b8b59bf4e569219da7db4429e389479cb7c4a2f311e34217357d594ecad7d58ccfeef2a9e93c6fcf8d98897d88c
+ dsf-role:
+ - ADMIN
The second example defines a group of DSF administrators by specifying an admin
role that gets matched against OAuth 2.0 access tokens:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - token-role-admins:
+ token-role: admin
+ dsf-role:
+ - ADMIN
The third example allows administrator access and users e-mail addresses to match this role. E-mail addresses from X.509 client certificates and OAuth 2.0 access tokens are matched:
DEV_DSF_BPE_SERVER_ROLECONFIG: |
+ - email-admins:
+ email:
+ - first.user@test.org
+ - second.user@test.org
+ dsf-role:
+ - ADMIN
The DSF BPE server provides a user interface for administrators. Without any additional configuration the user interface is not accessible with the organizations X.509 client certificate or any other certificate or OpenID Connect authenticated user.
\\nOpenID Connect
\\nTo enable OpenID Connect authentication of local user, see the DSF BPE server OpenID Connect configuration page.
\\nThis wiki entry walks through the steps to add a new FHIR resource to the server
dsf-fhir-server > resources > db
copy/past one file and change content to new resource (don't forget to allow permanent deletes like in db.questionnaires.changelog-0.6.0.xml
)dsf-fhir-server > resources > db > db.changelog.xml
include new generated db changelog file (are in alphabetic order)dsf-fhir-server > resources > db > trigger_functions
copy/paste existing functions for insert/update triggers and change content to new resourcedsf-fhir-server > resources > db > db.read_access.changelog.xml
include new generated functions for insert/update triggers (are in alphabetic order)dsf-fhir-rest-adapter > java
copy/paste existing adapter for json/xml/html and change content to new resourcedsf-fhir-webservice-client > java > FhirWebserviceClientJersey.java
register generated json/xml (not html) adapters according to existing registrations (are in alphabetic order)dsf-fhir-server > java > dao
copy/paste dao interface and change content to new resourcedsf-fhir-server > java > search > parameters
copy/paste a search parameter class based on type of the parameter (extends abstract search parameter type) and change content to new resource (existing search parameters can be found on the resources specification website)dsf-fhir-server > java > search > parameters > rev > include
copy/paste rev include class and adapt content to resource to be rev includeddsf-fhir-server > java > search > parameters > user
copy/paste a search user filter class and adapt content to new resourcedsf-fhir-server > java > dao > jdbc copy/paste dao jdbc
class and adapt content (add search user filter and parameter)dsf-fhir-server > java > spring > config > DaoConfig.java
register bean by adding a method according to existing resource bean methods (are in alphabetic order) and add it in the `daoProvider()´ methoddsf-fhir-server > java > spring > config > EventConfig.java
add the new created bean from the DaoConfig
to the MatcherFactory
according to existing dao's (are in alphabetic order)dsf-fhir-server > java > dao > provider > DaoProvider.java
add a method similar to the existing onedsf-fhir-server > java > dao > provider > DaoProviderImpl.java
adapt the class according to the other resource and implement the method similar to the existing onedsf-fhir-rest-adapter > java > service > ReferenceExtractor.java
add a method similar to the existing onesdsf-fhir-rest-adapter > java > service > ReferenceExtractorImpl.java
implement the method similar to the existing onesdsf-fhir-server > java > authorization
copy/paste class and change content to new resourcedsf-fhir-server > java > spring > config > AuthorizationConfig.java
register bean by adding a method according to existing resource bean methods (are in alphabetic order) and add it in the authorizationRuleProvider()
and the binaryAuthorizationRule()
methoddsf-fhir-server > java > webservice > specification
copy/paste interface and change content to new resourcedsf-fhir-server > java > webservice > impl
copy/paste class and change content to new resourcedsf-fhir-server > java > webservice > jaxrs
copy/paste class and change content to new resourcedsf-fhir-server > java > webservice > secure
copy/paste class and change content to new resourcedsf-fhir-server > java > webservice > impl > ConformanceServiceImpl
add new Resource to list and add Search Parameter created abovedsf-fhir-server > java > spring > config > WebserviceConfig.java
register bean by adding a method according to existing resource bean methods (are in alphabetic order)dsf-fhir-server > test-java > dao
copy/paste class and change tests to new resourcedsf-fhir-server > test-java > integration
copy/paste class and change tests to new resource and above create search parametersThis wiki entry walks through the steps to add a new FHIR resource to the server
\\ndsf-fhir-server > resources > db
copy/past one file and change content to new resource (don't forget to allow permanent deletes like in db.questionnaires.changelog-0.6.0.xml
)dsf-fhir-server > resources > db > db.changelog.xml
include new generated db changelog file (are in alphabetic order)dsf-fhir-server > resources > db > trigger_functions
copy/paste existing functions for insert/update triggers and change content to new resourcedsf-fhir-server > resources > db > db.read_access.changelog.xml
include new generated functions for insert/update triggers (are in alphabetic order)The Master Patient Index (MPI) client that will be used by the Business Process Engine (BPE) is determined by the property org.highmed.dsf.bpe.mpi.webservice.factory.class
and loaded using a service loader, which searches for a class of type MasterPatientIndexClientFactory
on startup of the BPE.
The framework currently includes an MPI client using the IHE PDQ interface, also supporting client certificate authentication. To use it, add the jar of the dsf-mpi-client-pdq
module to the plugin
configuration folder and set the property value to org.highmed.mpi.client.pdq.MasterPatientIndexClientPdqFactory
.
To implement a new MPI client, the following has to be taken into account:
MasterPatientIndexClientFactory
from the dsf-mpi-client
module.META-INF/services/org.highmed.mpi.client.MasterPatientIndexClientFactory
containing the name of the new MPI client factory including the full package name.MasterPatientIndexClient
. The interface defines a method returning instances of the interface Idat
based on patient-ids used within the openEHR repository.An example of an MPI client implementation can be found in the dsf-mpi-client-pdq
module.
The Master Patient Index (MPI) client that will be used by the Business Process Engine (BPE) is determined by the property org.highmed.dsf.bpe.mpi.webservice.factory.class
and loaded using a service loader, which searches for a class of type MasterPatientIndexClientFactory
on startup of the BPE.
The openEHR client that will be used by the Business Process Engine (BPE) is determined by the property org.highmed.dsf.bpe.openehr.webservice.factory.class
and loaded using a service loader, which searches for a class of type OpenEhrClientFactory
on startup of the BPE.
The framework currently includes an openEHR Jersey REST client using basic authentication. To use it, add the jar of the dsf-openehr-client-impl
module to the plugin
configuration folder and set the property value to org.highmed.openehr.client.impl.OpenEhrClientJerseyFactory
.
To implement a new openEHR client, the following has to be taken into account:
OpenEhrClientFactory
from the dsf-openehr-client
module.META-INF/services/org.highmed.openehr.client.OpenEhrClientFactory
containing the name of the new openehr client factory including the full package name.OpenEhrClient
. The interface defines a method executing an openEHR query returning an instance of an openEHR ResultSet
.An example of an openEHR client implementation can be found in the dsf-openehr-client-impl
module.
The openEHR client that will be used by the Business Process Engine (BPE) is determined by the property org.highmed.dsf.bpe.openehr.webservice.factory.class
and loaded using a service loader, which searches for a class of type OpenEhrClientFactory
on startup of the BPE.
To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists in our introduction.
"}');export{g as comp,v as data}; diff --git a/assets/allowList-mgm.html-CIu0ylPp.js b/assets/allowList-mgm.html-CIu0ylPp.js new file mode 100644 index 000000000..703d82842 --- /dev/null +++ b/assets/allowList-mgm.html-CIu0ylPp.js @@ -0,0 +1 @@ +import{_ as a}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,b as t,d as i,e as n,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const o=h("RouteLink");return c(),r("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),n(o,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("in our introduction")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists in our introduction.
"}');export{g as comp,v as data}; diff --git a/assets/allowList-mgm.html-CQIC2n1m.js b/assets/allowList-mgm.html-CQIC2n1m.js new file mode 100644 index 000000000..0943eba37 --- /dev/null +++ b/assets/allowList-mgm.html-CQIC2n1m.js @@ -0,0 +1 @@ +import{_ as o}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as n,b as t,d as i,e as r,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const a=h("RouteLink");return c(),n("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),r(a,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("here")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the Gecko Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use your personal client certificate or the client certificate from your DSF BPE, which needs to be added to your webbrowser. For instructions on how to add a client certificate to your browser, please follow here.
Currently, the Allow List Management Tool is only for DSF TEST infrastructure. For production, please write us an E-Mail with your information.
When you have fulfilled all the prerequisites, you can start managing your Allow Lists on the DSF Allow List Management Tool.
At the beginning, a popup will appear where you have to select your certificate. Only then you will have access to the website.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists here.
"}');export{w as comp,g as data}; diff --git a/assets/allowList-mgm.html-CZLnHuTV.js b/assets/allowList-mgm.html-CZLnHuTV.js new file mode 100644 index 000000000..73f382d43 --- /dev/null +++ b/assets/allowList-mgm.html-CZLnHuTV.js @@ -0,0 +1 @@ +import{_ as a}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,b as t,d as i,e as n,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const o=h("RouteLink");return c(),r("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),n(o,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("in our introduction")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists in our introduction.
"}');export{g as comp,v as data}; diff --git a/assets/allowList-mgm.html-Cbz8uLvU.js b/assets/allowList-mgm.html-Cbz8uLvU.js new file mode 100644 index 000000000..18df70cd3 --- /dev/null +++ b/assets/allowList-mgm.html-Cbz8uLvU.js @@ -0,0 +1 @@ +import{_ as a}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,b as t,d as i,e as n,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const o=h("RouteLink");return c(),r("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),n(o,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("in our introduction")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists in our introduction.
"}');export{g as comp,v as data}; diff --git a/assets/allowList-mgm.html-DQ0SCXIv.js b/assets/allowList-mgm.html-DQ0SCXIv.js new file mode 100644 index 000000000..2cf83afec --- /dev/null +++ b/assets/allowList-mgm.html-DQ0SCXIv.js @@ -0,0 +1 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as o,b as a,d as t,e as n,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const i=h("RouteLink");return c(),o("div",null,[a("p",null,[e[1]||(e[1]=t("You can read all about the concept of Allow Lists ")),n(i,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[t("in our introduction")])),_:1}),e[2]||(e[2]=t("."))]),e[3]||(e[3]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
You can read all about the concept of Allow Lists in our introduction.
\\nTo simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
"}');export{g as comp,w as data}; diff --git a/assets/allowList-mgm.html-DesR31sE.js b/assets/allowList-mgm.html-DesR31sE.js new file mode 100644 index 000000000..d10249b1c --- /dev/null +++ b/assets/allowList-mgm.html-DesR31sE.js @@ -0,0 +1 @@ +import{_ as o}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as n,b as t,d as i,e as r,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const a=h("RouteLink");return c(),n("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),r(a,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("here")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the Gecko Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use your personal client certificate or the client certificate from your DSF BPE, which needs to be added to your webbrowser. For instructions on how to add a client certificate to your browser, please follow here.
Currently, the Allow List Management Tool is only for DSF TEST infrastructure. For production, please write us an E-Mail with your information.
When you have fulfilled all the prerequisites, you can start managing your Allow Lists on the DSF Allow List Management Tool.
At the beginning, a popup will appear where you have to select your certificate. Only then you will have access to the website.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists here.
"}');export{w as comp,g as data}; diff --git a/assets/allowList-mgm.html-DqHlTqXK.js b/assets/allowList-mgm.html-DqHlTqXK.js new file mode 100644 index 000000000..eb35b9c1f --- /dev/null +++ b/assets/allowList-mgm.html-DqHlTqXK.js @@ -0,0 +1 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as o,b as a,d as t,e as n,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const i=h("RouteLink");return c(),o("div",null,[a("p",null,[e[1]||(e[1]=t("You can read all about the concept of Allow Lists ")),n(i,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[t("in our introduction")])),_:1}),e[2]||(e[2]=t("."))]),e[3]||(e[3]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
You can read all about the concept of Allow Lists in our introduction.
\\nTo simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
"}');export{g as comp,w as data}; diff --git a/assets/allowList-mgm.html-IMzRqiGI.js b/assets/allowList-mgm.html-IMzRqiGI.js new file mode 100644 index 000000000..059ea87bd --- /dev/null +++ b/assets/allowList-mgm.html-IMzRqiGI.js @@ -0,0 +1 @@ +import{_ as a}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,b as t,d as i,e as n,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const o=h("RouteLink");return c(),r("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),n(o,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("in our introduction")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists in our introduction.
"}');export{g as comp,v as data}; diff --git a/assets/allowList-mgm.html-RGpFQJqM.js b/assets/allowList-mgm.html-RGpFQJqM.js new file mode 100644 index 000000000..8c07b2dbc --- /dev/null +++ b/assets/allowList-mgm.html-RGpFQJqM.js @@ -0,0 +1 @@ +import{_ as n}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,b as t,d as i,e as a,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(p,e){const o=h("RouteLink");return c(),r("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),a(o,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("in our introduction")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the Gecko Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use your personal client certificate or the client certificate from your DSF BPE, which needs to be added to your webbrowser. For instructions on how to add a client certificate to your browser, please follow here.
When you have fulfilled all the prerequisites, you can start managing your Allow Lists respective Allow List Management Tool.
Click here to open the DSF Allow List Management Tool for the Test infrastructure.
Click here to open the DSF Allow List Management Tool for the Production infrastructure.
We use different colors for the DSF Allow List Management Tools in the Test (green) and Production (blue) infastructure.
At the beginning, a popup will appear where you have to select your certificate. Only then you will have access to the website.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists in our introduction.
"}');export{g as comp,w as data}; diff --git a/assets/allowList-mgm.html-bG96t5GT.js b/assets/allowList-mgm.html-bG96t5GT.js new file mode 100644 index 000000000..0e263ea36 --- /dev/null +++ b/assets/allowList-mgm.html-bG96t5GT.js @@ -0,0 +1 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as o,b as a,d as t,e as n,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const i=h("RouteLink");return c(),o("div",null,[a("p",null,[e[1]||(e[1]=t("You can read all about the concept of Allow Lists ")),n(i,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[t("in our introduction")])),_:1}),e[2]||(e[2]=t("."))]),e[3]||(e[3]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
You can read all about the concept of Allow Lists in our introduction.
\\nTo simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
"}');export{g as comp,w as data}; diff --git a/assets/allowList-mgm.html-kVH8jHoC.js b/assets/allowList-mgm.html-kVH8jHoC.js new file mode 100644 index 000000000..690fe1d26 --- /dev/null +++ b/assets/allowList-mgm.html-kVH8jHoC.js @@ -0,0 +1 @@ +import{_ as o}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as n,b as t,d as i,e as r,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const a=h("RouteLink");return c(),n("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),r(a,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("here")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the Gecko Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use your personal client certificate or the client certificate from your DSF BPE, which needs to be added to your webbrowser. For instructions on how to add a client certificate to your browser, please follow here.
Currently, the Allow List Management Tool is only for DSF TEST infrastructure. For production, please write us an E-Mail with your information.
When you have fulfilled all the prerequisites, you can start managing your Allow Lists on the DSF Allow List Management Tool.
At the beginning, a popup will appear where you have to select your certificate. Only then you will have access to the website.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists here.
"}');export{w as comp,g as data}; diff --git a/assets/allowList-mgm.html-zpJcNu7t.js b/assets/allowList-mgm.html-zpJcNu7t.js new file mode 100644 index 000000000..917c28075 --- /dev/null +++ b/assets/allowList-mgm.html-zpJcNu7t.js @@ -0,0 +1 @@ +import{_ as a}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,b as t,d as i,e as n,f as s,a as l,r as h,o as c}from"./app-BIWb5uIp.js";const u={};function d(f,e){const o=h("RouteLink");return c(),r("div",null,[e[3]||(e[3]=t("div",{class:"hint-container caution"},[t("p",{class:"hint-container-title"},"Caution"),t("p",null,[i("This is an outdated version of the Allow List Management documentation. Please use "),t("a",{href:"/stable/maintain/allowList-mgm"},"the current version"),i(", even if you use an outdated DSF version.")])],-1)),t("p",null,[e[1]||(e[1]=i("You can read all about the concept of Allow Lists ")),n(o,{to:"/intro/info/allowList.html"},{default:s(()=>e[0]||(e[0]=[i("in our introduction")])),_:1}),e[2]||(e[2]=i("."))]),e[4]||(e[4]=l('To simplify the DSF Allow List Management we have built a portal for administration. The portal is managed by the GECKO Institute at Heilbronn University. You as an DSF administrator can create or update your Allow List information. The information you provide on this portal will be transferred to us and will be used to built Allow List bundles that get distributed to the communication partners of the distributed processes.
The DSF Allow List management tool uses client certificates for authentication. You can either use a personal client certificate or the client certificate from your DSF BPE, which needs to be added to your web-browsers certificate store.
my-hospital.de
https://dsf.my-hospital.de/fhir
When you have fulfilled all the prerequisites, you can start managing your Allow Lists via the environment specific Allow List Management Tool:
We use different highlight colors for the DSF Allow List Management Tool: Green for the Test environment and blue for the Production infrastructure. To access the site, you have to authenticate yourself with a client certificate. Your web-browser will show a dialog to choose a valid certificate.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to contact us on the MII-Zulip Channel or write us at gth-gecko@hs-heilbronn.de. Thank you very much!
Caution
\\nThis is an outdated version of the Allow List Management documentation. Please use the current version, even if you use an outdated DSF version.
\\nYou can read all about the concept of Allow Lists in our introduction.
"}');export{g as comp,v as data}; diff --git a/assets/allowList.html-U0Ic9Agz.js b/assets/allowList.html-U0Ic9Agz.js new file mode 100644 index 000000000..632825508 --- /dev/null +++ b/assets/allowList.html-U0Ic9Agz.js @@ -0,0 +1 @@ +import{_ as n}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,a as s,b as a,e as l,f as r,d as t,r as d,o as h}from"./app-BIWb5uIp.js";const c="/photos/info/allowList/allowList-architecture.png",f={};function w(m,e){const o=d("RouteLink");return h(),i("div",null,[e[2]||(e[2]=s('The main objective is to allow only authorized organizations to do what "we" allow them to do (e.g. query data).
First, we need a list of organizations that we trust. Secondly, we need a way to ensure that the other party is a member of the parent organization. Thirdly, a list of actions we want to allow the organization to perform is needed. An organization can have different roles in different use cases.
The Allow List consists of Organization-, Endpoint- and OrganisationAffiliation- resources. With these resources the allow list defines communication partners and and parent organizations like research consortia and groups as well as the roles of each organization. Each DSF FHIR server stores their own allow list. To make sure that processes can be executed, parties must allow access via their allow list.
The main objective is to allow only authorized organizations to do what \\"we\\" allow them to do (e.g. query data).
\\nFirst, we need a list of organizations that we trust. Secondly, we need a way to ensure that the other party is a member of the parent organization. Thirdly, a list of actions we want to allow the organization to perform is needed. An organization can have different roles in different use cases.
The Data Sharing Framework implements a distributed business process engine based on the BPMN 2.0 and FHIR R4 standards. Every participating organisation (e.g. ORG. A) runs a FHIR endpoint accessible by other sites and a business process engine (BPE) in the local secured network. Once the DSF has been installed in an organisation, it can be used for multiple use cases.
The following architecture diagram also shows three organisations, each of which has installed the DSF. The FHIR endpoint (green) is located in a demilitarised zone (DMZ) and is accessible from outside for communication with other organisations. It can be compared to a mailbox. The Business Process Engine (BPE - blue) is located in the intranet of an organisation and is responsible for the execution of processes. The metaphor: control centre helps here.
As mentioned above, the externally accessible DSF FHIR server acts as a mailbox for communication. This means that an organisation creates a task resource in its DSF and drops the task resource (letters) into the mailbox of another organisation, requesting that something happen. Task resources have been explained in more detail in the section Basics and Standards.
It is important to understand that the DSF FHIR server is not used for persisting medical data.
The BPE located in the secure internal network executes the processes (BPMN/Java). The BPE is deployed in the internal network and has access to the local systems, such as the organisation's own FHIR server, on which medical data is stored. These FHIR servers are not to be confused with the DSF FHIR server, on which no medical data is persisted.
Different processes can be executed simultaneously. For this, only a new process plugin file has to be added and configured. More about this in the Process Plugins.
The DSF BPE uses websocket (WSS) and webservice (HTTPS) connections to communicate with the DSF FHIR server. FHIR resources are created, read, updated and deleted via HTTP requests against the FHIR webservice API. The FHIR subscription mechanism is used to communicate Task resources with status 'requested' and QuestionnaireResponse resources with status 'completed' to the BPE via websockets. When the BPE starts and before the websocket connections are established, 'requested' Task resources and 'completed' QuestionnaireResponse not seen by the BPE are read via webservice requests.
The deployment of the architecture is flexible. The organisations can be deployed as a star schema (left) or as a mesh schema (right). In the Star schema (left), all Data Integration Centres (DIC) are connected to a central node (CRR - Central Research Repository), which transfers the information to all nodes (DIC). For security reasons, a data transfer hub (DTH) is connected upstream, which provides additional security so that the medical data is never transferred together with the authenticating data.
In the mesh scheme (right), the nodes (DIC) are all directly connected to each other and the information is transferred from node to node. Here in the FDPG (Forschungsdatenportal - Research Data Portal), the data can then be accessed for research purposes. More about this here.
You can find more information about the network setup here
',20)]))}const y=t(m,[["render",g],["__file","architecture.html.vue"]]),F=JSON.parse('{"path":"/intro/info/architecture.html","title":"Architecture","lang":"en-US","frontmatter":{"title":"Architecture","icon":"structure","gitInclude":[]},"headers":[{"level":2,"title":"DSF FHIR Server 📫","slug":"dsf-fhir-server","link":"#dsf-fhir-server","children":[]},{"level":2,"title":"Business Process Engine (BPE)","slug":"business-process-engine-bpe","link":"#business-process-engine-bpe","children":[]},{"level":2,"title":"Flexible Deployment","slug":"flexible-deployment","link":"#flexible-deployment","children":[]},{"level":2,"title":"Network Setup & Additional Reverse Proxy in external DMZ","slug":"network-setup-additional-reverse-proxy-in-external-dmz","link":"#network-setup-additional-reverse-proxy-in-external-dmz","children":[]}],"readingTime":{"minutes":1.94,"words":582},"filePathRelative":"intro/info/architecture.md","excerpt":"The Data Sharing Framework implements a distributed business process engine based on the BPMN 2.0 and FHIR R4 standards. Every participating organisation (e.g. ORG. A) runs a FHIR endpoint accessible by other sites and a business process engine (BPE) in the local secured network. Once the DSF has been installed in an organisation, it can be used for multiple use cases.
"}');export{y as comp,F as data}; diff --git a/assets/authentication.html-6LTSdQiG.js b/assets/authentication.html-6LTSdQiG.js new file mode 100644 index 000000000..67a2301e9 --- /dev/null +++ b/assets/authentication.html-6LTSdQiG.js @@ -0,0 +1 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,a as r,o as n}from"./app-BIWb5uIp.js";const a={};function s(o,e){return n(),i("div",null,e[0]||(e[0]=[r('Authentication of organizations within the DSF is handled by the use of X.509 client and server certificates. Currently the certificate authorities run by DFN-PKI Global G2, D-Trust via TMF e.V. and GÉANT TCS via DFN are supported. All participating organizations are entered in a distributed and synchronized allow-list of valid organizations and certificates.
A webserver certificate is needed to run the FHIR endpoint and a 802.1X client certificate is used to authenticate against other organizations endpoints and as a server certificate for the business process engine. For available certificate profiles see DFN-PKI-Zertifikatprofile_Global.pdf
Authentication of organizations within the DSF is handled by the use of X.509 client and server certificates. Currently the certificate authorities run by DFN-PKI Global G2, D-Trust via TMF e.V. and GÉANT TCS via DFN are supported. All participating organizations are entered in a distributed and synchronized allow-list of valid organizations and certificates.
"}');export{d as comp,f as data}; diff --git a/assets/basics.html-TQpBZp3U.js b/assets/basics.html-TQpBZp3U.js new file mode 100644 index 000000000..7911825ab --- /dev/null +++ b/assets/basics.html-TQpBZp3U.js @@ -0,0 +1 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as n,a,o as s}from"./app-BIWb5uIp.js";const i="/photos/info/basics/interoperability.png",r="/photos/info/basics/bpmn1.png",o="/photos/info/basics/bpmn2.png",h={};function d(l,e){return s(),n("div",null,e[0]||(e[0]=[a('Here you can find some basic information about interoperability and the standards were using within the DSF before we go into details about the architecture. Here we only describe how the standards (FHIR and BPMN) are used within the DSF. If you want to gain a deeper knowledge of the standards, we recommend visiting these websites: HL7 FHIR and BPMN
Interoperability is the ability of different systems to work together as seamlessly as possible. We can divide interoperability into four levels.
HL7 FHIR is a standard for data exchange that can be used as an information model for communication within and between systems. The standard is based on resources, references and profiles. With this concept, FHIR offers interoperability out of the box. The resources describe data formats. 150 specified resources cover the entire health system. An example of a resource would be a patient, which consists of data such as name or gender. These resources can refer to other resources by means of references. This connects the information units into a network. For seamless exchange of information, FHIR supports RESTful architectures and web standards such as XML or JSON, which makes it easier for developers to use FHIR.
The FHIR profiles can be understood as a set of rules. They explain, for example, which attributes must be mandatorily specified or which terminology may be used. In addition, profiles and controlled vocabulary can be validated.
Business Process Modelling Notation is a modelling language that can be used to model and implement processes. The models can be used for the documentation of processes and for communication between different stakeholders. Furthermore, BPMN forms a standardised bridge between process design and process implementation. This is because it simplifies implementation. These processes are executed by a Business Process Engine. Basically, a BPE is a server that can read and execute the business process. More about this in the section on architecture.
On the one hand we use FHIR because of the mentioned benefits. On the other hand FHIR fits great with BPMN and these two in combination are a great fit for what we do:
Execute distributed data sharing Processes.
We do not need all 150 FHIR resources. The following FHIR resources are the ones we need and have implemented: ActivityDefinition, Binary, Bundle, CodeSystem, DocumentReference, Endpoint, Group, Library, Measure, MeasureReport, NamingSystem, Organization, Questionnaire, QuestionnaireResponse, ResearchStudy, StructureDefinition, Subscription, Task and ValueSet.
Don't worry, it is not important to understand them all now. But to understand why we use FHIR and BPMN, it is important to look at the ActivityDefinition and TaskResources on the FHIR side and the Message Events on the BPMN side.
In the following picture you can see parts of BPMN. These Message Events enable the communication between different organizations. Every time there is a Message Event between two business processes, there is a corresponding TaskResource on the FHIR side. When one organization sends a message for example “do some work” to another organization or when we send a message to ourselves to start or continue a process, we do this by creating a FHIR TaskResource with the status “requested”. After that the Business Process Engine starts the work and the status switches to “in-progress” and if the work is done to “completed” or if there is a problem to “failed”.
The ActivityDefinition is needed to publish what can be done in an instance. That means the ActivityDefinition contains the process description with the authorisation who is allowed to send a message.
',16)]))}const p=t(h,[["render",d],["__file","basics.html.vue"]]),f=JSON.parse('{"path":"/intro/info/basics.html","title":"Basics and Standards","lang":"en-US","frontmatter":{"title":"Basics and Standards","icon":"study","gitInclude":[]},"headers":[{"level":2,"title":"Interoperability","slug":"interoperability","link":"#interoperability","children":[]},{"level":2,"title":"HL7 FHIR 🔥","slug":"hl7-fhir","link":"#hl7-fhir","children":[]},{"level":2,"title":"BPMN","slug":"bpmn","link":"#bpmn","children":[]},{"level":2,"title":"Why are we using FHIR and BPMN?","slug":"why-are-we-using-fhir-and-bpmn","link":"#why-are-we-using-fhir-and-bpmn","children":[]}],"readingTime":{"minutes":2.59,"words":778},"filePathRelative":"intro/info/basics.md","excerpt":"Here you can find some basic information about interoperability and the standards were using within the DSF before we go into details about the architecture. Here we only describe how the standards (FHIR and BPMN) are used within the DSF. If you want to gain a deeper knowledge of the standards, we recommend visiting these websites: HL7 FHIR and BPMN
"}');export{p as comp,f as data}; diff --git a/assets/bpe.html-xUo025DH.js b/assets/bpe.html-xUo025DH.js new file mode 100644 index 000000000..8c138e664 --- /dev/null +++ b/assets/bpe.html-xUo025DH.js @@ -0,0 +1 @@ +import{_ as s}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as i,o as t}from"./app-BIWb5uIp.js";const o={};function l(n,e){return t(),r("div",null,e[0]||(e[0]=[i('Please also check common parameters for additional configuration options.
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_server_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
hospital.com
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
-1
means infinite number of retries-1
5000
process
foo.bar, test.com:8080
http://proxy.foo:8080
Please also check common parameters for additional configuration options.
\\ntrue
false
Prerequisite: Java 11, Maven 3.6, Docker
mvn install
Prerequisite: Java 11, Maven 3.6, PostgreSQL 11
mvn install
CREATE USER liquibase_user WITH LOGIN NOSUPERUSER INHERIT CREATEDB CREATEROLE NOREPLICATION
+PASSWORD 'fLp6ZSd5QrMAkGZMjxqXjmcWrTfa3Dn8fA57h92Y';
CREATE DATABASE bpe OWNER liquibase_user;
+CREATE DATABASE fhir OWNER liquibase_user;
Prerequisite: Java 11, Maven 3.6, Docker 18
mvn install
docker-build.bat
docker-build.sh
dev:
+docker-compose up
+
+prod:
+docker-compose -f docker-compose.yml -f docker-compose.prod.yml up
dev:
+docker-compose up
+
+prod:
+docker-compose -f docker-compose.yml -f docker-compose.prod.yml up
docker network ls -q | xargs docker network rm
docker-compose down -v
Each process contains an ExampleStarter which creates FHIR resources and sends them to a designated FHIR-Endpoint in order to start the corresponding process in the Manual Integration Test Setup. The same client certificate can be used as above: .../highmed-dsf/dsf-tools/dsf-tools-test-data-generator/cert/Webbrowser_Test_User/Webbrowser_Test_User_certificate.p12 (Password: password).
The following configuration is needed:
DSF_CLIENT_CERTIFICATE_PATH
or args[0]
has to be setDSF_CLIENT_CERTIFICATE_PASSWORD
or args[1]
has to be setPrerequisite: Java 11, Maven 3.6, Docker
\\nmvn install
\\n
DEPRECATED SINCE VERSION 0.4.0
A service task of a process integrated in the framework can be overwritten using the plugin interface. This means that entire processes do not have to be replaced when only individual steps need adaption. An example can be found in the module dsf-bpe > dsf-bpe-process-plugin-example > dsf-bpe-process-service-overwrite
.
A plugin has as its only dependency the process module which contains the service task to be overwritten.
For a plugin replacing one service task, two new files must be generated:
doExecute(DelegateExecution execution)
.@Primary
.\\n\\n\\n
DEPRECATED SINCE VERSION 0.4.0
A service task of a process integrated in the framework can be overwritten using the plugin interface. This means that entire processes do not have to be replaced when only individual steps need adaption. An example can be found in the module dsf-bpe > dsf-bpe-process-plugin-example > dsf-bpe-process-service-overwrite
.
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
Start now by visiting our contribution pages. Every line of code helps us build a stronger and more versatile DSF.
You can import our code style for your specific IDE:
Java
, Code style
, Formatter
, Import
and select the downloaded file.Editor
, Code style
, Java
, the settings icon, import scheme
, IntelliJ
and select the downloaded file.Pull requests will only be approved if the code is formatted according to the code style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Git Flow is used as this project's branching strategy. Therefore, you will find the following structure:
Notice that only the first two elements listed are actual branches. The other elements are containers to hold all branches belonging to that category.
The following ruleset is applied to name branches:
issue/<issue-number>_<issue-name>
hotfix/<version>
release/<version>
This chapter lists all important requirements to get the project buildable and running properly.
This project uses Java JDK 17, so make sure you have it installed on your system.
Docker is used in this project to test database functionality and to run more complex test-setups.
The project relies on Maven as its management tool.
Important: When building the project you might encounter the following error:
Could not determine gpg version GPG is used to sign artifacts for public release. Since this does not concern contributors, you may skip this step in the maven build process with -Dgpg.skip
.
We follow Martin Fowler's method for managing pull requests. This approach categorizes pull requests based on the level of trust and experience of the contributor, as well as the impact of the changes. Here's how we apply it:
Ship: For our most trusted contributors with a proven track record. These members can merge their pull requests without prior review, typically for minor or highly confident changes.
Show: This level is for trusted contributors who need some oversight, as well as for experienced developers who want to demonstrate how certain changes should be made in the future. They create pull requests and show their work to the team.
Ask: New or less experienced contributors, as well as those submitting more complex changes, fall into this category. They are required to ask for feedback and approval before their changes can be merged, ensuring thorough review and quality control.
This method helps us maintain a balance between code quality and efficient development, recognizing the varying levels of expertise among our contributors.
For more information on Fowler's approach, visit Martin Fowler's article on Pull Requests.
The DSF (Data Sharing Framework) and its process plugins are frequently used to transmit sensitive personal data. To prevent the release of personal data during development, please adhere to the following guidelines:
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
\\nYour code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
Start now by visiting our contribution pages. Every line of code helps us build a stronger and more versatile DSF.
You can import our code style for your specific IDE:
Java
, Code style
, Formatter
, Import
and select the downloaded file.Editor
, Code style
, Java
, the settings icon, import scheme
, IntelliJ
and select the downloaded file.Pull requests will only be approved if the code is formatted according to the code style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Git Flow is used as this project's branching strategy. Therefore, you will find the following structure:
Notice that only the first two elements listed are actual branches. The other elements are containers to hold all branches belonging to that category.
The following ruleset is applied to name branches:
issue/<issue-number>_<issue-name>
hotfix/<version>
release/<version>
This chapter lists all important requirements to get the project buildable and running properly.
This project uses Java JDK 17, so make sure you have it installed on your system.
Docker is used in this project to test database functionality and to run more complex test-setups.
The project relies on Maven as its management tool.
Important: When building the project you might encounter the following error:
Could not determine gpg version GPG is used to sign artifacts for public release. Since this does not concern contributors, you may skip this step in the maven build process with -Dgpg.skip
.
We follow Martin Fowler's method for managing pull requests. This approach categorizes pull requests based on the level of trust and experience of the contributor, as well as the impact of the changes. Here's how we apply it:
Ship: For our most trusted contributors with a proven track record. These members can merge their pull requests without prior review, typically for minor or highly confident changes.
Show: This level is for trusted contributors who need some oversight, as well as for experienced developers who want to demonstrate how certain changes should be made in the future. They create pull requests and show their work to the team.
Ask: New or less experienced contributors, as well as those submitting more complex changes, fall into this category. They are required to ask for feedback and approval before their changes can be merged, ensuring thorough review and quality control.
This method helps us maintain a balance between code quality and efficient development, recognizing the varying levels of expertise among our contributors.
For more information on Fowler's approach, visit Martin Fowler's article on Pull Requests.
The DSF (Data Sharing Framework) and its process plugins are frequently used to transmit sensitive personal data. To prevent the release of personal data during development, please adhere to the following guidelines:
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
\\nYour code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
Start now by visiting our contribution pages. Every line of code helps us build a stronger and more versatile DSF.
You can import our code style for your specific IDE:
Java
, Code style
, Formatter
, Import
and select the downloaded file.Editor
, Code style
, Java
, the settings icon, import scheme
, IntelliJ
and select the downloaded file.Pull requests will only be approved if the code is formatted according to the code style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Git Flow is used as this project's branching strategy. Therefore, you will find the following structure:
Notice that only the first two elements listed are actual branches. The other elements are containers to hold all branches belonging to that category.
The following ruleset is applied to name branches:
issue/<issue-number>_<issue-name>
hotfix/<version>
release/<version>
This chapter lists all important requirements to get the project buildable and running properly.
This project uses Java JDK 17, so make sure you have it installed on your system.
Docker is used in this project to test database functionality and to run more complex test-setups.
The project relies on Maven as its management tool.
Important: When building the project you might encounter the following error:
Could not determine gpg version GPG is used to sign artifacts for public release. Since this does not concern contributors, you may skip this step in the maven build process with -Dgpg.skip
.
We follow Martin Fowler's method for managing pull requests. This approach categorizes pull requests based on the level of trust and experience of the contributor, as well as the impact of the changes. Here's how we apply it:
Ship: For our most trusted contributors with a proven track record. These members can merge their pull requests without prior review, typically for minor or highly confident changes.
Show: This level is for trusted contributors who need some oversight, as well as for experienced developers who want to demonstrate how certain changes should be made in the future. They create pull requests and show their work to the team.
Ask: New or less experienced contributors, as well as those submitting more complex changes, fall into this category. They are required to ask for feedback and approval before their changes can be merged, ensuring thorough review and quality control.
This method helps us maintain a balance between code quality and efficient development, recognizing the varying levels of expertise among our contributors.
For more information on Fowler's approach, visit Martin Fowler's article on Pull Requests.
The DSF (Data Sharing Framework) and its process plugins are frequently used to transmit sensitive personal data. To prevent the release of personal data during development, please adhere to the following guidelines:
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
\\nYour code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
Start now by visiting our contribution pages. Every line of code helps us build a stronger and more versatile DSF.
You can import our code style for your specific IDE:
Java
, Code style
, Formatter
, Import
and select the downloaded file.Editor
, Code style
, Java
, the settings icon, import scheme
, IntelliJ
and select the downloaded file.Pull requests will only be approved if the code is formatted according to the code style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Git Flow is used as this project's branching strategy. Therefore, you will find the following structure:
Notice that only the first two elements listed are actual branches. The other elements are containers to hold all branches belonging to that category.
The following ruleset is applied to name branches:
issue/<issue-number>_<issue-name>
hotfix/<version>
release/<version>
This chapter lists all important requirements to get the project buildable and running properly.
This project uses Java JDK 17, so make sure you have it installed on your system.
Docker is used in this project to test database functionality and to run more complex test-setups.
The project relies on Maven as its management tool.
Important: When building the project you might encounter the following error:
Could not determine gpg version GPG is used to sign artifacts for public release. Since this does not concern contributors, you may skip this step in the maven build process with -Dgpg.skip
.
We follow Martin Fowler's method for managing pull requests. This approach categorizes pull requests based on the level of trust and experience of the contributor, as well as the impact of the changes. Here's how we apply it:
Ship: For our most trusted contributors with a proven track record. These members can merge their pull requests without prior review, typically for minor or highly confident changes.
Show: This level is for trusted contributors who need some oversight, as well as for experienced developers who want to demonstrate how certain changes should be made in the future. They create pull requests and show their work to the team.
Ask: New or less experienced contributors, as well as those submitting more complex changes, fall into this category. They are required to ask for feedback and approval before their changes can be merged, ensuring thorough review and quality control.
This method helps us maintain a balance between code quality and efficient development, recognizing the varying levels of expertise among our contributors.
For more information on Fowler's approach, visit Martin Fowler's article on Pull Requests.
The DSF (Data Sharing Framework) and its process plugins are frequently used to transmit sensitive personal data. To prevent the release of personal data during development, please adhere to the following guidelines:
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
\\nPull Requests are only approved, if the code is formatted according to the code-style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Since Release 0.1.0, we follow git-flow
as described here.
New features should branch from develop
and merged back if done. Hot-Fixes for the latest release will branch of master
and will be merged into develop and later into master. A new release will branch of develop for a ramp down phase and will then be merged into master. The new master should merge back into develop to start a new development cycle.
issue/<issue-number>_<issue-name>
hot-fix/<issue-number>_<issue-name>
release/<version>
HiGHmed DSF code-style configurations for Eclipse and IntelliJ IDEA can be found here:
\\nPull Requests are only approved, if the code is formatted according to the code-style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
Start now by visiting our contribution pages. Every line of code helps us build a stronger and more versatile DSF.
You can import our code style for your specific IDE:
Java
, Code style
, Formatter
, Import
and select the downloaded file.Editor
, Code style
, Java
, the settings icon, import scheme
, IntelliJ
and select the downloaded file.Pull requests will only be approved if the code is formatted according to the code style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Git Flow is used as this project's branching strategy. Therefore, you will find the following structure:
Notice that only the first two elements listed are actual branches. The other elements are containers to hold all branches belonging to that category.
The following ruleset is applied to name branches:
issue/<issue-number>_<issue-name>
hotfix/<version>
release/<version>
This chapter lists all important requirements to get the project buildable and running properly.
This project uses Java JDK 17, so make sure you have it installed on your system.
Docker is used in this project to test database functionality and to run more complex test-setups.
The project relies on Maven as its management tool.
Important: When building the project you might encounter the following error:
Could not determine gpg version GPG is used to sign artifacts for public release. Since this does not concern contributors, you may skip this step in the maven build process with -Dgpg.skip
.
We follow Martin Fowler's method for managing pull requests. This approach categorizes pull requests based on the level of trust and experience of the contributor, as well as the impact of the changes. Here's how we apply it:
Ship: For our most trusted contributors with a proven track record. These members can merge their pull requests without prior review, typically for minor or highly confident changes.
Show: This level is for trusted contributors who need some oversight, as well as for experienced developers who want to demonstrate how certain changes should be made in the future. They create pull requests and show their work to the team.
Ask: New or less experienced contributors, as well as those submitting more complex changes, fall into this category. They are required to ask for feedback and approval before their changes can be merged, ensuring thorough review and quality control.
This method helps us maintain a balance between code quality and efficient development, recognizing the varying levels of expertise among our contributors.
For more information on Fowler's approach, visit Martin Fowler's article on Pull Requests.
The DSF (Data Sharing Framework) and its process plugins are frequently used to transmit sensitive personal data. To prevent the release of personal data during development, please adhere to the following guidelines:
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
\\nYour code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
Start now by visiting our contribution pages. Every line of code helps us build a stronger and more versatile DSF.
You can import our code style for your specific IDE:
Java
, Code style
, Formatter
, Import
and select the downloaded file.Editor
, Code style
, Java
, the settings icon, import scheme
, IntelliJ
and select the downloaded file.Pull requests will only be approved if the code is formatted according to the code style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Git Flow is used as this project's branching strategy. Therefore, you will find the following structure:
Notice that only the first two elements listed are actual branches. The other elements are containers to hold all branches belonging to that category.
The following ruleset is applied to name branches:
issue/<issue-number>_<issue-name>
hotfix/<version>
release/<version>
This chapter lists all important requirements to get the project buildable and running properly.
This project uses Java JDK 17, so make sure you have it installed on your system.
Docker is used in this project to test database functionality and to run more complex test-setups.
The project relies on Maven as its management tool.
Important: When building the project you might encounter the following error:
Could not determine gpg version GPG is used to sign artifacts for public release. Since this does not concern contributors, you may skip this step in the maven build process with -Dgpg.skip
.
We follow Martin Fowler's method for managing pull requests. This approach categorizes pull requests based on the level of trust and experience of the contributor, as well as the impact of the changes. Here's how we apply it:
Ship: For our most trusted contributors with a proven track record. These members can merge their pull requests without prior review, typically for minor or highly confident changes.
Show: This level is for trusted contributors who need some oversight, as well as for experienced developers who want to demonstrate how certain changes should be made in the future. They create pull requests and show their work to the team.
Ask: New or less experienced contributors, as well as those submitting more complex changes, fall into this category. They are required to ask for feedback and approval before their changes can be merged, ensuring thorough review and quality control.
This method helps us maintain a balance between code quality and efficient development, recognizing the varying levels of expertise among our contributors.
For more information on Fowler's approach, visit Martin Fowler's article on Pull Requests.
The DSF (Data Sharing Framework) and its process plugins are frequently used to transmit sensitive personal data. To prevent the release of personal data during development, please adhere to the following guidelines:
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
\\nYour code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
Start now by visiting our contribution pages. Every line of code helps us build a stronger and more versatile DSF.
You can import our code style for your specific IDE:
Java
, Code style
, Formatter
, Import
and select the downloaded file.Editor
, Code style
, Java
, the settings icon, import scheme
, IntelliJ
and select the downloaded file.Pull requests will only be approved if the code is formatted according to the code style configurations above. To format the code with maven before pushing to GitHub, use mvn compile -Pformat-and-sort
.
Git Flow is used as this project's branching strategy. Therefore, you will find the following structure:
Notice that only the first two elements listed are actual branches. The other elements are containers to hold all branches belonging to that category.
The following ruleset is applied to name branches:
issue/<issue-number>_<issue-name>
hotfix/<version>
release/<version>
This chapter lists all important requirements to get the project buildable and running properly.
This project uses Java JDK 17, so make sure you have it installed on your system.
Docker is used in this project to test database functionality and to run more complex test-setups.
The project relies on Maven as its management tool.
Important: When building the project you might encounter the following error:
Could not determine gpg version GPG is used to sign artifacts for public release. Since this does not concern contributors, you may skip this step in the maven build process with -Dgpg.skip
.
We follow Martin Fowler's method for managing pull requests. This approach categorizes pull requests based on the level of trust and experience of the contributor, as well as the impact of the changes. Here's how we apply it:
Ship: For our most trusted contributors with a proven track record. These members can merge their pull requests without prior review, typically for minor or highly confident changes.
Show: This level is for trusted contributors who need some oversight, as well as for experienced developers who want to demonstrate how certain changes should be made in the future. They create pull requests and show their work to the team.
Ask: New or less experienced contributors, as well as those submitting more complex changes, fall into this category. They are required to ask for feedback and approval before their changes can be merged, ensuring thorough review and quality control.
This method helps us maintain a balance between code quality and efficient development, recognizing the varying levels of expertise among our contributors.
For more information on Fowler's approach, visit Martin Fowler's article on Pull Requests.
The DSF (Data Sharing Framework) and its process plugins are frequently used to transmit sensitive personal data. To prevent the release of personal data during development, please adhere to the following guidelines:
Your code can make a difference for the Data Sharing Framework (DSF). We invite all users to share their code, tests, scripts and improvement ideas. Contributions of any size enhance the DSF and support the data sharing framework community.
\\n0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
0.0.0.0
127.0.0.1
Please note: Additional parameters (not listed here) are used to configure process plugins.
org.highmed.consent.client.stub.ConsentClientStubFactory
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
http://proxy.foo:8080
2000
60000
true
false
http://proxy.foo:8080
http://proxy.foo:8080
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
hospital.com
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
org.highmed.mpi.client.stub.MasterPatientIndexClientStubFactory
org.highmed.openehr.client.stub.OpenEhrClientStubFactory
-1
means infinite number of retries-1
5000
process
org.highmed.pseudonymization.client.stub.PseudonymizationClientStubFactory
Please note: Additional parameters (not listed here) are used to configure process plugins.
\\norg.highmed.consent.client.stub.ConsentClientStubFactory
Please note: Additional parameters (not listed here) are used to define elements of the external FHIR bundle.
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
http://proxy.foo:8080
2000
10000
/run/secrets/app_client_trust_certificates.pem
true
false
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
https://foo.bar/fhir
conf/bundle.xml
hospital.com
20
Please note: Additional parameters (not listed here) are used to define elements of the external FHIR bundle.
\\n/run/secrets/app_client_certificate.pem
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
secondsSSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
app
, 172.28.1.3
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_server_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
hospital.com
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
-1
means infinite number of retries-1
5000
process
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./bpe
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./bpe
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_server_trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
https://foo.bar/bpe
https://localhost/bpe
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client _trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_server_trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
""
or a single block scalar |
character if no roles should be configuredfalse
false
for developmenttrue
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./bpe
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client _trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
false
true
false
true
false
true
false
true
false
true
false
true
false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
ca/server_cert_root_cas.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
; requires SMTP server to be configuredfalse
true
; requires SMTP server to be configuredfalse
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
ca/server_cert_root_cas.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
https://foo.bar/bpe
https://localhost/bpe
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
ca/server_cert_root_cas.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
ca/client_cert_ca_chains.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_server_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path/bpe
SSLCACertificateFile
ca/client_cert_ca_chains.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are usedca/client_cert_issuing_cas.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
, may contain all certificates between the server certificate and the root ca certificate (excluding the root ca certificate). Omit SSL_CERTIFICATE_CHAIN_FILE
if chain included/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
C
values, must be a comma-separated list of strings in single quotation marks, e.g. 'DE', 'FR'
. If a client certificate with a not configured subject country C
value is used, the server answers with a 403 Forbidden
status code'DE'
CN
values, must be a comma-separated list of strings in single quotation marks. If a client certificate from a not configured issuing ca common-name is used, the server answers with a 403 Forbidden
status code'GEANT TLS ECC 1', 'HARICA OV TLS ECC', 'GEANT TLS RSA 1', 'HARICA OV TLS RSA', 'GEANT S/MIME ECC 1', 'HARICA S/MIME ECC', 'GEANT S/MIME RSA 1', 'HARICA S/MIME RSA', 'DFN-Verein Global Issuing CA', 'Fraunhofer User CA - G02', 'D-TRUST SSL Class 3 CA 1 2009', 'Sectigo RSA Organization Validation Secure Server CA', 'GEANT OV RSA CA 4', 'GEANT Personal CA 4', 'GEANT eScience Personal CA 4', 'Sectigo ECC Organization Validation Secure Server CA', 'GEANT OV ECC CA 4', 'GEANT Personal ECC CA 4', 'GEANT eScience Personal ECC CA 4', 'D-TRUST Limited Basic CA 1-2 2019', 'D-TRUST Limited Basic CA 1-3 2019'
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client_trust_certificates.pem
ca/server_cert_root_cas.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
true
false
true
false
true
false
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
ca/server_cert_root_cas.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
ca/client_cert_ca_chains.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client _trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
true
.false
true
.false
true
.false
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
https://foo.bar/bpe
https://localhost/bpe
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client _trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
true
.false
true
.false
true
.false
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
https://foo.bar/bpe
https://localhost/bpe
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path/bpe
SSLCACertificateFile
ca/client_cert_ca_chains.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are usedca/client_cert_issuing_cas.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
, may contain all certificates between the server certificate and the root ca certificate (excluding the root ca certificate). Omit SSL_CERTIFICATE_CHAIN_FILE
if chain included/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
C
values, must be a comma-separated list of strings in single quotation marks, e.g. 'DE', 'FR'
. If a client certificate with a not configured subject country C
value is used, the server answers with a 403 Forbidden
status code'DE'
CN
values, must be a comma-separated list of strings in single quotation marks. If a client certificate from a not configured issuing ca common-name is used, the server answers with a 403 Forbidden
status code'GEANT TLS ECC 1', 'HARICA OV TLS ECC', 'GEANT TLS RSA 1', 'HARICA OV TLS RSA', 'GEANT S/MIME ECC 1', 'HARICA S/MIME ECC', 'GEANT S/MIME RSA 1', 'HARICA S/MIME RSA', 'DFN-Verein Global Issuing CA', 'Fraunhofer User CA - G02', 'D-TRUST SSL Class 3 CA 1 2009', 'Sectigo RSA Organization Validation Secure Server CA', 'GEANT OV RSA CA 4', 'GEANT Personal CA 4', 'GEANT eScience Personal CA 4', 'Sectigo ECC Organization Validation Secure Server CA', 'GEANT OV ECC CA 4', 'GEANT Personal ECC CA 4', 'GEANT eScience Personal ECC CA 4', 'D-TRUST Limited Basic CA 1-2 2019', 'D-TRUST Limited Basic CA 1-3 2019'
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client _trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
true
.false
true
.false
true
.false
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
https://foo.bar/bpe
https://localhost/bpe
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client _trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
true
.false
true
.false
true
.false
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path/fhir
SSLCACertificateFile
ca/client_cert_ca_chains.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are usedca/client_cert_issuing_cas.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
, may contain all certificates between the server certificate and the root ca certificate (excluding the root ca certificate). Omit SSL_CERTIFICATE_CHAIN_FILE
if chain included/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
C
values, must be a comma-separated list of strings in single quotation marks, e.g. 'DE', 'FR'
. If a client certificate with a not configured subject country C
value is used, the server answers with a 403 Forbidden
status code'DE'
CN
values, must be a comma-separated list of strings in single quotation marks. If a client certificate from a not configured issuing ca common-name is used, the server answers with a 403 Forbidden
status code'GEANT TLS ECC 1', 'HARICA OV TLS ECC', 'GEANT TLS RSA 1', 'HARICA OV TLS RSA', 'GEANT S/MIME ECC 1', 'HARICA S/MIME ECC', 'GEANT S/MIME RSA 1', 'HARICA S/MIME RSA', 'DFN-Verein Global Issuing CA', 'Fraunhofer User CA - G02', 'D-TRUST SSL Class 3 CA 1 2009', 'Sectigo RSA Organization Validation Secure Server CA', 'GEANT OV RSA CA 4', 'GEANT Personal CA 4', 'GEANT eScience Personal CA 4', 'Sectigo ECC Organization Validation Secure Server CA', 'GEANT OV ECC CA 4', 'GEANT Personal ECC CA 4', 'GEANT eScience Personal ECC CA 4', 'D-TRUST Limited Basic CA 1-2 2019', 'D-TRUST Limited Basic CA 1-3 2019'
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client _trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client_trust_certificates.pem
ca/server_cert_root_cas.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
true
false
true
false
true
false
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
ca/server_cert_root_cas.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
ca/client_cert_ca_chains.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
false
true
false
true
false
true
false
true
false
true
false
true
false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
ca/server_cert_root_cas.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
; requires SMTP server to be configuredfalse
true
; requires SMTP server to be configuredfalse
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
ca/server_cert_root_cas.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
https://foo.bar/bpe
https://localhost/bpe
false
false
for developmenttrue
dev
, test
and prod
environments im configured; supported values: dev
, test
and prod
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
ca/server_cert_root_cas.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
ca/client_cert_ca_chains.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./fhir
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
secondsSSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path./bpe
SSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
app
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
seconds/
character at start, no /
character at end, use ''
(empty string) to configure root as context path/fhir
SSLCACertificateFile
ca/client_cert_ca_chains.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are usedca/client_cert_issuing_cas.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
, may contain all certificates between the server certificate and the root ca certificate (excluding the root ca certificate). Omit SSL_CERTIFICATE_CHAIN_FILE
if chain included/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
C
values, must be a comma-separated list of strings in single quotation marks, e.g. 'DE', 'FR'
. If a client certificate with a not configured subject country C
value is used, the server answers with a 403 Forbidden
status code'DE'
CN
values, must be a comma-separated list of strings in single quotation marks. If a client certificate from a not configured issuing ca common-name is used, the server answers with a 403 Forbidden
status code'GEANT TLS ECC 1', 'HARICA OV TLS ECC', 'GEANT TLS RSA 1', 'HARICA OV TLS RSA', 'GEANT S/MIME ECC 1', 'HARICA S/MIME ECC', 'GEANT S/MIME RSA 1', 'HARICA S/MIME RSA', 'DFN-Verein Global Issuing CA', 'Fraunhofer User CA - G02', 'D-TRUST SSL Class 3 CA 1 2009', 'Sectigo RSA Organization Validation Secure Server CA', 'GEANT OV RSA CA 4', 'GEANT Personal CA 4', 'GEANT eScience Personal CA 4', 'Sectigo ECC Organization Validation Secure Server CA', 'GEANT OV ECC CA 4', 'GEANT Personal ECC CA 4', 'GEANT eScience Personal ECC CA 4', 'D-TRUST Limited Basic CA 1-2 2019', 'D-TRUST Limited Basic CA 1-3 2019'
SSLVerifyClient
optional
when using OIDC authenticationrequire
app
, 172.28.1.3
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/bpe
camunda_users
/run/secrets/db_user_camunda.password
camunda_server_user
bpe_users
/run/secrets/db_user.password
bpe_server_user
true
.false
true
.false
true
.false
true
.false
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
60000
true
false
5000
60000
true
false
/run/secrets/app_client_trust_certificates.pem
?criteria=QuestionnaireResponse%3Fstatus%3Dcompleted&status=active&type=websocket&payload=application/fhir%2Bjson
https://foo.bar/fhir
-1
means infinite number of retries-1
5000
?criteria=Task%3Fstatus%3Drequested&status=active&type=websocket&payload=application/fhir%2Bjson
/run/secrets/smtp_server_client_certificate.pem
/run/secrets/smtp_server_client_certificate_private_key.pem
/run/secrets/smtp_server_client_certificate_private_key.pem.password
sender@localhost
smtp.server.de
4
/opt/bpe/log/bpe.log
465
0
reply.to@localhost
true
. Requires SMTP server to be configured.false
true
. Requires SMTP server to be configured.false
/run/secrets/smime_certificate.p12
/run/secrets/smime_certificate.p12.password
recipient@localhost
cc.recipient@localhost
/run/secrets/smtp_server_trust_certificates.pem
true
false
4
10
40
dsfdev_updateAllowList|1.0, another_process|x.y
-1
means infinite number of retries-1
5000
process
old_process|x.y
<= 0
means number of cpu cores-1
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_client _trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
https://foo.bar/fhir
conf/bundle.xml
hospital.com
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
20
false
false
for developmenttrue
foo.bar, test.com:8080
http://proxy.foo:8080
0.0.0.0
127.0.0.1
8080
X-ClientCert
true
to enable OIDC authorization code flowfalse
true
to enable OIDC back-channel logouttrue
(enabled), DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID and DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH to be specifiedfalse
/back-channel-logout
true
to enable OIDC bearer token authenticationfalse
/run/secrets/oidc_provider_client_certificate.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem
/run/secrets/oidc_provider_client_certificate_private_key.pem.password
5000
30000
/run/secrets/oidc_provider_trust_certificates.pem
https://keycloak.test.com:8443/realms/example-realm-name
/run/secrets/app_client_trust_certificates.pem
bpe
docker image: /bpe
, default in fhir
docker image: /fhir
127.0.0.1
10000
/run/secrets/app_client_certificate.pem
Feel free to contact us via E-Mail (dsf-gecko@hs-heilbronn.de) and we will take care of your request as soon as possible.
DSF's community is growing and we welcome anyone who would like to join! 🚀
Feel free to contact us via E-Mail (dsf-gecko@hs-heilbronn.de) and we will take care of your request as soon as possible.
"}');export{b as comp,g as data}; diff --git a/assets/create.html-B1E91RlH.js b/assets/create.html-B1E91RlH.js new file mode 100644 index 000000000..420192dd2 --- /dev/null +++ b/assets/create.html-B1E91RlH.js @@ -0,0 +1 @@ +import{_ as l}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,b as t,d as r,e as n,f as a,r as i,o as p}from"./app-BIWb5uIp.js";const d={},h={class:"hint-container tip"};function u(g,e){const o=i("RouteLink");return p(),s("div",null,[t("p",null,[e[1]||(e[1]=r("Visit the ")),n(o,{to:"/for-you/"},{default:a(()=>e[0]||(e[0]=[r("how the DSF can help you")])),_:1}),e[2]||(e[2]=r(" page to get started."))]),t("div",h,[e[6]||(e[6]=t("p",{class:"hint-container-title"},"Work in progress",-1)),t("p",null,[e[4]||(e[4]=r("We are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the ")),n(o,{to:"/oldstable/tutorial/"},{default:a(()=>e[3]||(e[3]=[r("process plugin tutorial")])),_:1}),e[5]||(e[5]=r(". Additionally, we recommend to take a look at the upgraded DSF processes here:"))]),e[7]||(e[7]=t("ul",null,[t("li",null,[r("The "),t("a",{href:"https://github.com/datasharingframework/dsf-process-hello-world",target:"_blank",rel:"noopener noreferrer"},"hello world plugin")]),t("li",null,[r("The "),t("a",{href:"https://github.com/datasharingframework/dsf-process-ping-pong",target:"_blank",rel:"noopener noreferrer"},"ping pong plugin")]),t("li",null,[r("The "),t("a",{href:"https://github.com/datasharingframework/dsf-process-allow-list",target:"_blank",rel:"noopener noreferrer"},"update allowlist plugin")])],-1))])])}const c=l(d,[["render",u],["__file","create.html.vue"]]),w=JSON.parse('{"path":"/v1.6.0/develop/create.html","title":"Create a new process plugin","lang":"en-US","frontmatter":{"title":"Create a new process plugin","icon":"code","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.34,"words":102},"filePathRelative":"v1.6.0/develop/create.md","excerpt":"Visit the how the DSF can help you page to get started.
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nVisit the how the DSF can help you page to get started.
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nVisit the how the DSF can help you page to get started.
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nVisit the how the DSF can help you page to get started.
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nVisit the how the DSF can help you page to get started.
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nVisit the how the DSF can help you page to get started.
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nVisit the how the DSF can help you page to get started.
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nWork in progress
\\nWe are currently in the process of updating the written documentation on how to create a DSF process plugin. In the meantime we recommend to study the oldstable version of the process plugin tutorial. Additionally, we recommend to take a look at the upgraded DSF processes here:
\\nJoin us in enhancing our documentation!
We believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
We're excited to see your suggestions and are grateful for every contribution that helps us improve. Let's build better documentation together!
',4)]))}const l=t(n,[["render",i],["__file","documentation.html.vue"]]),h=JSON.parse(`{"path":"/v1.5.2/contribute/documentation.html","title":"Contribute documentation","lang":"en-US","frontmatter":{"title":"Contribute documentation","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.74,"words":222},"filePathRelative":"v1.5.2/contribute/documentation.md","excerpt":"Join us in enhancing our documentation!
\\nWe believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
\\nJoin us in enhancing our documentation!
We believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
We're excited to see your suggestions and are grateful for every contribution that helps us improve. Let's build better documentation together!
',4)]))}const l=t(n,[["render",i],["__file","documentation.html.vue"]]),h=JSON.parse(`{"path":"/stable/contribute/documentation.html","title":"Contribute documentation","lang":"en-US","frontmatter":{"title":"Contribute documentation","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.74,"words":222},"filePathRelative":"stable/contribute/documentation.md","excerpt":"Join us in enhancing our documentation!
\\nWe believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
\\nJoin us in enhancing our documentation!
We believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
We're excited to see your suggestions and are grateful for every contribution that helps us improve. Let's build better documentation together!
',4)]))}const l=t(n,[["render",i],["__file","documentation.html.vue"]]),h=JSON.parse(`{"path":"/v1.5.1/contribute/documentation.html","title":"Contribute documentation","lang":"en-US","frontmatter":{"title":"Contribute documentation","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.74,"words":222},"filePathRelative":"v1.5.1/contribute/documentation.md","excerpt":"Join us in enhancing our documentation!
\\nWe believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
\\nJoin us in enhancing our documentation!
We believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
We're excited to see your suggestions and are grateful for every contribution that helps us improve. Let's build better documentation together!
',4)]))}const l=t(n,[["render",i],["__file","documentation.html.vue"]]),h=JSON.parse(`{"path":"/v1.5.0/contribute/documentation.html","title":"Contribute documentation","lang":"en-US","frontmatter":{"title":"Contribute documentation","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.74,"words":222},"filePathRelative":"v1.5.0/contribute/documentation.md","excerpt":"Join us in enhancing our documentation!
\\nWe believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
\\nJoin us in enhancing our documentation!
We believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
We're excited to see your suggestions and are grateful for every contribution that helps us improve. Let's build better documentation together!
',4)]))}const l=t(n,[["render",i],["__file","documentation.html.vue"]]),h=JSON.parse(`{"path":"/v1.4.0/contribute/documentation.html","title":"Contribute documentation","lang":"en-US","frontmatter":{"title":"Contribute documentation","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.74,"words":222},"filePathRelative":"v1.4.0/contribute/documentation.md","excerpt":"Join us in enhancing our documentation!
\\nWe believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
\\nJoin us in enhancing our documentation!
We believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
We're excited to see your suggestions and are grateful for every contribution that helps us improve. Let's build better documentation together!
',4)]))}const l=t(n,[["render",i],["__file","documentation.html.vue"]]),h=JSON.parse(`{"path":"/v1.6.0/contribute/documentation.html","title":"Contribute documentation","lang":"en-US","frontmatter":{"title":"Contribute documentation","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.74,"words":222},"filePathRelative":"v1.6.0/contribute/documentation.md","excerpt":"Join us in enhancing our documentation!
\\nWe believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
\\nJoin us in enhancing our documentation!
We believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
We're excited to see your suggestions and are grateful for every contribution that helps us improve. Let's build better documentation together!
',4)]))}const l=t(n,[["render",i],["__file","documentation.html.vue"]]),h=JSON.parse(`{"path":"/v1.7.0/contribute/documentation.html","title":"Contribute documentation","lang":"en-US","frontmatter":{"title":"Contribute documentation","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.74,"words":222},"filePathRelative":"v1.7.0/contribute/documentation.md","excerpt":"Join us in enhancing our documentation!
\\nWe believe in the power of community collaboration to make our documentation clearer, more comprehensive, and more user-friendly. There are several ways you can contribute, and we welcome greatly your input!
\\n<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<profiles version="15">
+ <profile kind="CodeFormatterProfile" name="highmed_dsf" version="15">
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_ellipsis" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_for_statment" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_package" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_method_invocation" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_after_imports" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_switch_statement" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.format_javadoc_comments" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.indentation.size" value="4"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_enum_constant_declaration" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.align_with_spaces" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.disabling_tag" value="@formatter:off"/>
+ <setting id="org.eclipse.jdt.core.formatter.continuation_indentation" value="2"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_enum_constants" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_before_imports" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_after_package" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_binary_operator" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_if_while_statement" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.indent_root_tags" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.enabling_tag" value="@formatter:on"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.count_line_length_from_starting_position" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_field" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_method" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_parameterized_type_references" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_annotation_declaration_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_enum_constant" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_block" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.align_tags_descriptions_grouped" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.line_length" value="120"/>
+ <setting id="org.eclipse.jdt.core.formatter.use_on_off_tags" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_method_body_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_loop_body_block_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_method_declaration" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_enum_constant_declaration_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.align_variable_declarations_on_columns" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_union_type_in_multicatch" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_type_declaration_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_binary_expression" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_catch_clause" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_anonymous_type_declaration_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_block" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_lambda_body" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.compact_else_if" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_type_parameters" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_compact_loops" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_try" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_simple_for_body_on_same_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_binary_operator" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_unary_operator" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_annotation" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_ellipsis" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_try_resources" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.format_line_comments" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.align_type_members_on_columns" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_assignment" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_module_statements" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.align_tags_names_descriptions" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_if_then_body_block_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_conditional_expression" value="80"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.align_assignment_statements_on_columns" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_type" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_block_in_case" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.format_header" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_method_declaration" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.join_wrapped_lines" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.wrap_before_conditional_operator" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.align_fields_grouping_blank_lines" value="2147483647"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_resources_in_try" value="80"/>
+ <setting id="org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_try_clause" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_code_block_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.tabulation.size" value="4"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.format_source_code" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_try" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_try_resources" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_before_field" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer" value="2"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_before_method" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.wrap_before_assignment_operator" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_switch" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_type_annotation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.format_html" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_method_delcaration" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_compact_if" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_lambda_body_block_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_empty_lines" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_type_arguments" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_unary_operator" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_label" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_before_member_type" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_try" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.format_block_comments" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_lambda_arrow" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_body" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_multiple_fields" value="16"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_simple_while_body_on_same_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_array_initializer" value="end_of_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.wrap_before_binary_operator" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.parentheses_positions_in_lambda_declaration" value="common_lines"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_simple_do_while_body_on_same_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_enum_declaration_on_one_line" value="one_line_never"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_constant" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.brace_position_for_type_declaration" value="next_line"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_before_package" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.alignment_for_expressions_in_for_loop_header" value="0"/>
+ <setting id="org.eclipse.jdt.core.formatter.keep_simple_getter_setter_on_one_line" value="false"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_lambda_arrow" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.join_lines_in_comments" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.comment.indent_parameter_description" value="true"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement" value="insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.tabulation.char" value="tab"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.blank_lines_between_import_groups" value="1"/>
+ <setting id="org.eclipse.jdt.core.formatter.lineSplit" value="120"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation" value="do not insert"/>
+ <setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch" value="insert"/>
+ </profile>
+</profiles>
<?xml version=\\"1.0\\" encoding=\\"UTF-8\\" standalone=\\"no\\"?>\\n<profiles version=\\"15\\">\\n <profile kind=\\"CodeFormatterProfile\\" name=\\"highmed_dsf\\" version=\\"15\\">\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_ellipsis\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_for_statment\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_package\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_method_invocation\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_after_imports\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_switch_statement\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.format_javadoc_comments\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indentation.size\\" value=\\"4\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_enum_constant_declaration\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.align_with_spaces\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.disabling_tag\\" value=\\"@formatter:off\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.continuation_indentation\\" value=\\"2\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_enum_constants\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_before_imports\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_after_package\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_binary_operator\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_if_while_statement\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.indent_root_tags\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.enabling_tag\\" value=\\"@formatter:on\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.count_line_length_from_starting_position\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_field\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_method\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_parameterized_type_references\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_annotation_declaration_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_enum_constant\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_statements_compare_to_block\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.align_tags_descriptions_grouped\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.line_length\\" value=\\"120\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.use_on_off_tags\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_method_body_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_loop_body_block_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_method_declaration\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_enum_constant_declaration_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.align_variable_declarations_on_columns\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_union_type_in_multicatch\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_type_declaration_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_binary_expression\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_catch_clause\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_anonymous_type_declaration_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_block\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_lambda_body\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.compact_else_if\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_type_parameters\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_compact_loops\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_try\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_simple_for_body_on_same_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_binary_operator\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_unary_operator\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_annotation\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_ellipsis\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_try_resources\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.format_line_comments\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.align_type_members_on_columns\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_assignment\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_module_statements\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.align_tags_names_descriptions\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_if_then_body_block_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_conditional_expression\\" value=\\"80\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.align_assignment_statements_on_columns\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_type\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_block_in_case\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.format_header\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_method_declaration\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.join_wrapped_lines\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.wrap_before_conditional_operator\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.align_fields_grouping_blank_lines\\" value=\\"2147483647\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_resources_in_try\\" value=\\"80\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_try_clause\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_code_block_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.tabulation.size\\" value=\\"4\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.format_source_code\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_try\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_try_resources\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_before_field\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer\\" value=\\"2\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_before_method\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.wrap_before_assignment_operator\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_switch\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_type_annotation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.format_html\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_method_delcaration\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_compact_if\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_lambda_body_block_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_empty_lines\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_type_arguments\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_unary_operator\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_label\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_before_member_type\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_semicolon\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_try\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.format_block_comments\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_lambda_arrow\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_statements_compare_to_body\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_multiple_fields\\" value=\\"16\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_simple_while_body_on_same_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_array_initializer\\" value=\\"end_of_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.wrap_before_binary_operator\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.parentheses_positions_in_lambda_declaration\\" value=\\"common_lines\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_simple_do_while_body_on_same_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_enum_declaration_on_one_line\\" value=\\"one_line_never\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_enum_constant\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.brace_position_for_type_declaration\\" value=\\"next_line\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_before_package\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.alignment_for_expressions_in_for_loop_header\\" value=\\"0\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.keep_simple_getter_setter_on_one_line\\" value=\\"false\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_lambda_arrow\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.join_lines_in_comments\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.comment.indent_parameter_description\\" value=\\"true\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement\\" value=\\"insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.tabulation.char\\" value=\\"tab\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.blank_lines_between_import_groups\\" value=\\"1\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.lineSplit\\" value=\\"120\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation\\" value=\\"do not insert\\"/>\\n <setting id=\\"org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch\\" value=\\"insert\\"/>\\n </profile>\\n</profiles>
\\nversion: '3.8'
+services:
+ proxy:
+ image: nginx:1.23
+ restart: "no"
+ ports:
+ - 127.0.0.1:443:443
+ secrets:
+ - proxy_certificate_and_int_cas.pem
+ - proxy_certificate_private_key.pem
+ - proxy_trusted_client_cas.pem
+ volumes:
+ - type: bind
+ source: ./proxy/conf.d
+ target: /etc/nginx/conf.d
+ read_only: true
+ - type: bind
+ source: ./proxy/nginx.conf
+ target: /etc/nginx/nginx.conf
+ read_only: true
+ networks:
+ dic-fhir-frontend:
+ ipv4_address: 172.20.0.66
+ hrp-fhir-frontend:
+ ipv4_address: 172.20.0.82
+ cos-fhir-frontend:
+ ipv4_address: 172.20.0.98
+ internet:
+ aliases:
+ - cos
+ - dic
+ - hrp
+ environment:
+ TZ: Europe/Berlin
+
+ db:
+ image: postgres:13
+ restart: "no"
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U liquibase_user -d postgres"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+ environment:
+ TZ: Europe/Berlin
+ POSTGRES_PASSWORD_FILE: /run/secrets/db_liquibase.password
+ POSTGRES_USER: liquibase_user
+ POSTGRES_DB: postgres
+ networks:
+ - cos-fhir-backend
+ - dic-fhir-backend
+ - hrp-fhir-backend
+ - cos-bpe-backend
+ - dic-bpe-backend
+ - hrp-bpe-backend
+ secrets:
+ - db_liquibase.password
+ volumes:
+ - type: volume
+ source: db-data
+ target: /var/lib/postgresql/data
+ - type: bind
+ source: ./db/init-db.sh
+ target: /docker-entrypoint-initdb.d/init-db.sh
+ read_only: true
+
+ cos-fhir:
+ image: ghcr.io/highmed/fhir:0.7.0
+ restart: "no"
+ ports:
+ - 127.0.0.1:5002:5002
+ secrets:
+ - db_liquibase.password
+ - db_cos_fhir_user.password
+ - db_cos_fhir_user_permanent_delete.password
+ - app_client_trust_certificates.pem
+ - app_cos_client_certificate.pem
+ - app_cos_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+ volumes:
+ - type: bind
+ source: ./cos/fhir/conf/bundle.xml
+ target: /opt/fhir/conf/bundle.xml
+ - type: bind
+ source: ./cos/fhir/log
+ target: /opt/fhir/log
+ environment:
+ TZ: Europe/Berlin
+ EXTRA_JVM_ARGS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5002
+ ORG_HIGHMED_DSF_FHIR_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PASSWORD_FILE: /run/secrets/db_cos_fhir_user.password
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_PASSWORD_FILE: /run/secrets/db_cos_fhir_user_permanent_delete.password
+ ORG_HIGHMED_DSF_FHIR_CLIENT_TRUST_CERTIFICATES: /run/secrets/app_client_trust_certificates.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_cos_client_certificate.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY: /run/secrets/app_cos_client_certificate_private_key.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+ ORG_HIGHMED_DSF_FHIR_DB_URL: jdbc:postgresql://db/cos_fhir
+ ORG_HIGHMED_DSF_FHIR_DB_USER_GROUP: cos_fhir_users
+ ORG_HIGHMED_DSF_FHIR_DB_USER_USERNAME: cos_fhir_server_user
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_GROUP: cos_fhir_permanent_delete_users
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_USERNAME: cos_fhir_server_permanent_delete_user
+ ORG_HIGHMED_DSF_FHIR_SERVER_BASE_URL: https://cos/fhir
+ ORG_HIGHMED_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: Test_COS
+ ORG_HIGHMED_DSF_FHIR_SERVER_USER_THUMBPRINTS: \${COS_USER_THUMBPRINTS}
+ ORG_HIGHMED_DSF_FHIR_SERVER_USER_THUMBPRINTS_PERMANENT_DELETE: \${COS_USER_THUMBPRINTS_PERMANENT_DELETE}
+ networks:
+ cos-fhir-frontend:
+ ipv4_address: 172.20.0.99
+ cos-fhir-backend:
+ internet:
+ depends_on:
+ - db
+ - proxy
+ cos-bpe:
+ image: ghcr.io/highmed/bpe:0.7.0
+ restart: "no"
+ ports:
+ - 127.0.0.1:5005:5005
+ secrets:
+ - db_liquibase.password
+ - db_cos_bpe_user.password
+ - db_cos_bpe_user_camunda.password
+ - app_client_trust_certificates.pem
+ - app_cos_client_certificate.pem
+ - app_cos_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+ volumes:
+ - type: bind
+ source: ./cos/bpe/plugin
+ target: /opt/bpe/plugin
+ read_only: true
+ - type: bind
+ source: ./cos/bpe/process
+ target: /opt/bpe/process
+ read_only: true
+ - type: bind
+ source: ./cos/bpe/log
+ target: /opt/bpe/log
+ - type: bind
+ source: ./cos/bpe/last_event
+ target: /opt/bpe/last_event
+ environment:
+ TZ: Europe/Berlin
+ EXTRA_JVM_ARGS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
+ ORG_HIGHMED_DSF_BPE_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+ ORG_HIGHMED_DSF_BPE_DB_USER_PASSWORD_FILE: /run/secrets/db_cos_bpe_user.password
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_PASSWORD_FILE: /run/secrets/db_cos_bpe_user_camunda.password
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_TRUST_CERTIFICATES: /run/secrets/app_client_trust_certificates.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_cos_client_certificate.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY: /run/secrets/app_cos_client_certificate_private_key.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+ ORG_HIGHMED_DSF_BPE_DB_URL: jdbc:postgresql://db/cos_bpe
+ ORG_HIGHMED_DSF_BPE_DB_USER_GROUP: cos_bpe_users
+ ORG_HIGHMED_DSF_BPE_DB_USER_USERNAME: cos_bpe_server_user
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_GROUP: cos_camunda_users
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_USERNAME: cos_camunda_server_user
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: Test_COS
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_BASE_URL: https://cos/fhir
+ networks:
+ cos-bpe-frontend:
+ cos-bpe-backend:
+ internet:
+ depends_on:
+ - db
+ - cos-fhir
+
+ dic-fhir:
+ image: ghcr.io/highmed/fhir:0.7.0
+ restart: "no"
+ ports:
+ - 127.0.0.1:5000:5000
+ secrets:
+ - db_liquibase.password
+ - db_dic_fhir_user.password
+ - db_dic_fhir_user_permanent_delete.password
+ - app_client_trust_certificates.pem
+ - app_dic_client_certificate.pem
+ - app_dic_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+ volumes:
+ - type: bind
+ source: ./dic/fhir/conf/bundle.xml
+ target: /opt/fhir/conf/bundle.xml
+ - type: bind
+ source: ./dic/fhir/log
+ target: /opt/fhir/log
+ environment:
+ TZ: Europe/Berlin
+ EXTRA_JVM_ARGS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5000
+ ORG_HIGHMED_DSF_FHIR_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PASSWORD_FILE: /run/secrets/db_dic_fhir_user.password
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_PASSWORD_FILE: /run/secrets/db_dic_fhir_user_permanent_delete.password
+ ORG_HIGHMED_DSF_FHIR_CLIENT_TRUST_CERTIFICATES: /run/secrets/app_client_trust_certificates.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_dic_client_certificate.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY: /run/secrets/app_dic_client_certificate_private_key.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+ ORG_HIGHMED_DSF_FHIR_DB_URL: jdbc:postgresql://db/dic_fhir
+ ORG_HIGHMED_DSF_FHIR_DB_USER_GROUP: dic_fhir_users
+ ORG_HIGHMED_DSF_FHIR_DB_USER_USERNAME: dic_fhir_server_user
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_GROUP: dic_fhir_permanent_delete_users
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_USERNAME: dic_fhir_server_permanent_delete_user
+ ORG_HIGHMED_DSF_FHIR_SERVER_BASE_URL: https://dic/fhir
+ ORG_HIGHMED_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: Test_DIC
+ ORG_HIGHMED_DSF_FHIR_SERVER_USER_THUMBPRINTS: \${DIC_USER_THUMBPRINTS}
+ ORG_HIGHMED_DSF_FHIR_SERVER_USER_THUMBPRINTS_PERMANENT_DELETE: \${DIC_USER_THUMBPRINTS_PERMANENT_DELETE}
+ networks:
+ dic-fhir-frontend:
+ ipv4_address: 172.20.0.67
+ dic-fhir-backend:
+ internet:
+ depends_on:
+ - db
+ - proxy
+ dic-bpe:
+ image: ghcr.io/highmed/bpe:0.7.0
+ restart: "no"
+ ports:
+ - 127.0.0.1:5003:5003
+ secrets:
+ - db_liquibase.password
+ - db_dic_bpe_user.password
+ - db_dic_bpe_user_camunda.password
+ - app_client_trust_certificates.pem
+ - app_dic_client_certificate.pem
+ - app_dic_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+ volumes:
+ - type: bind
+ source: ./dic/bpe/plugin
+ target: /opt/bpe/plugin
+ read_only: true
+ - type: bind
+ source: ./dic/bpe/process
+ target: /opt/bpe/process
+ read_only: true
+ - type: bind
+ source: ./dic/bpe/log
+ target: /opt/bpe/log
+ - type: bind
+ source: ./dic/bpe/last_event
+ target: /opt/bpe/last_event
+ environment:
+ TZ: Europe/Berlin
+ EXTRA_JVM_ARGS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5003
+ ORG_HIGHMED_DSF_BPE_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+ ORG_HIGHMED_DSF_BPE_DB_USER_PASSWORD_FILE: /run/secrets/db_dic_bpe_user.password
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_PASSWORD_FILE: /run/secrets/db_dic_bpe_user_camunda.password
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_TRUST_CERTIFICATES: /run/secrets/app_client_trust_certificates.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_dic_client_certificate.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY: /run/secrets/app_dic_client_certificate_private_key.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+ ORG_HIGHMED_DSF_BPE_DB_URL: jdbc:postgresql://db/dic_bpe
+ ORG_HIGHMED_DSF_BPE_DB_USER_GROUP: dic_bpe_users
+ ORG_HIGHMED_DSF_BPE_DB_USER_USERNAME: dic_bpe_server_user
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_GROUP: dic_camunda_users
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_USERNAME: dic_camunda_server_user
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: Test_DIC
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_BASE_URL: https://dic/fhir
+ networks:
+ dic-bpe-frontend:
+ dic-bpe-backend:
+ internet:
+ depends_on:
+ - db
+ - dic-fhir
+
+ hrp-fhir:
+ image: ghcr.io/highmed/fhir:0.7.0
+ restart: "no"
+ ports:
+ - 127.0.0.1:5001:5001
+ secrets:
+ - db_liquibase.password
+ - db_hrp_fhir_user.password
+ - db_hrp_fhir_user_permanent_delete.password
+ - app_client_trust_certificates.pem
+ - app_hrp_client_certificate.pem
+ - app_hrp_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+ volumes:
+ - type: bind
+ source: ./hrp/fhir/conf/bundle.xml
+ target: /opt/fhir/conf/bundle.xml
+ - type: bind
+ source: ./hrp/fhir/log
+ target: /opt/fhir/log
+ environment:
+ TZ: Europe/Berlin
+ EXTRA_JVM_ARGS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5001
+ ORG_HIGHMED_DSF_FHIR_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PASSWORD_FILE: /run/secrets/db_hrp_fhir_user.password
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_PASSWORD_FILE: /run/secrets/db_hrp_fhir_user_permanent_delete.password
+ ORG_HIGHMED_DSF_FHIR_CLIENT_TRUST_CERTIFICATES: /run/secrets/app_client_trust_certificates.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_hrp_client_certificate.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY: /run/secrets/app_hrp_client_certificate_private_key.pem
+ ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+ ORG_HIGHMED_DSF_FHIR_DB_URL: jdbc:postgresql://db/hrp_fhir
+ ORG_HIGHMED_DSF_FHIR_DB_USER_GROUP: hrp_fhir_users
+ ORG_HIGHMED_DSF_FHIR_DB_USER_USERNAME: hrp_fhir_server_user
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_GROUP: hrp_fhir_permanent_delete_users
+ ORG_HIGHMED_DSF_FHIR_DB_USER_PERMANENT_DELETE_USERNAME: hrp_fhir_server_permanent_delete_user
+ ORG_HIGHMED_DSF_FHIR_SERVER_BASE_URL: https://hrp/fhir
+ ORG_HIGHMED_DSF_FHIR_SERVER_USER_THUMBPRINTS: \${HRP_USER_THUMBPRINTS}
+ ORG_HIGHMED_DSF_FHIR_SERVER_USER_THUMBPRINTS_PERMANENT_DELETE: \${HRP_USER_THUMBPRINTS_PERMANENT_DELETE}
+ ORG_HIGHMED_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: Test_HRP
+ networks:
+ hrp-fhir-frontend:
+ ipv4_address: 172.20.0.83
+ hrp-fhir-backend:
+ internet:
+ depends_on:
+ - db
+ - proxy
+ hrp-bpe:
+ image: ghcr.io/highmed/bpe:0.7.0
+ restart: "no"
+ ports:
+ - 127.0.0.1:5004:5004
+ secrets:
+ - db_liquibase.password
+ - db_hrp_bpe_user.password
+ - db_hrp_bpe_user_camunda.password
+ - app_client_trust_certificates.pem
+ - app_hrp_client_certificate.pem
+ - app_hrp_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+ volumes:
+ - type: bind
+ source: ./hrp/bpe/plugin
+ target: /opt/bpe/plugin
+ read_only: true
+ - type: bind
+ source: ./hrp/bpe/process
+ target: /opt/bpe/process
+ read_only: true
+ - type: bind
+ source: ./hrp/bpe/log
+ target: /opt/bpe/log
+ - type: bind
+ source: ./hrp/bpe/last_event
+ target: /opt/bpe/last_event
+ environment:
+ TZ: Europe/Berlin
+ EXTRA_JVM_ARGS: -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5004
+ ORG_HIGHMED_DSF_BPE_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+ ORG_HIGHMED_DSF_BPE_DB_USER_PASSWORD_FILE: /run/secrets/db_hrp_bpe_user.password
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_PASSWORD_FILE: /run/secrets/db_hrp_bpe_user_camunda.password
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_TRUST_CERTIFICATES: /run/secrets/app_client_trust_certificates.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_hrp_client_certificate.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY: /run/secrets/app_hrp_client_certificate_private_key.pem
+ ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+ ORG_HIGHMED_DSF_BPE_DB_URL: jdbc:postgresql://db/hrp_bpe
+ ORG_HIGHMED_DSF_BPE_DB_USER_GROUP: hrp_bpe_users
+ ORG_HIGHMED_DSF_BPE_DB_USER_USERNAME: hrp_bpe_server_user
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_GROUP: hrp_camunda_users
+ ORG_HIGHMED_DSF_BPE_DB_USER_CAMUNDA_USERNAME: hrp_camunda_server_user
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: Test_HRP
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_BASE_URL: https://hrp/fhir
+ networks:
+ hrp-bpe-frontend:
+ hrp-bpe-backend:
+ internet:
+ depends_on:
+ - db
+ - hrp-fhir
+
+secrets:
+ proxy_certificate_and_int_cas.pem:
+ file: ./secrets/proxy_certificate_and_int_cas.pem
+ proxy_certificate_private_key.pem:
+ file: ./secrets/proxy_certificate_private_key.pem
+ proxy_trusted_client_cas.pem:
+ file: ./secrets/proxy_trusted_client_cas.pem
+
+ db_liquibase.password:
+ file: ./secrets/db_liquibase.password
+
+ db_dic_fhir_user.password:
+ file: ./secrets/db_dic_fhir_user.password
+ db_dic_fhir_user_permanent_delete.password:
+ file: ./secrets/db_dic_fhir_user_permanent_delete.password
+ db_dic_bpe_user.password:
+ file: ./secrets/db_dic_bpe_user.password
+ db_dic_bpe_user_camunda.password:
+ file: ./secrets/db_dic_bpe_user_camunda.password
+
+ db_hrp_fhir_user.password:
+ file: ./secrets/db_hrp_fhir_user.password
+ db_hrp_fhir_user_permanent_delete.password:
+ file: ./secrets/db_hrp_fhir_user_permanent_delete.password
+ db_hrp_bpe_user.password:
+ file: ./secrets/db_hrp_bpe_user.password
+ db_hrp_bpe_user_camunda.password:
+ file: ./secrets/db_hrp_bpe_user_camunda.password
+
+ db_cos_fhir_user.password:
+ file: ./secrets/db_cos_fhir_user.password
+ db_cos_fhir_user_permanent_delete.password:
+ file: ./secrets/db_cos_fhir_user_permanent_delete.password
+ db_cos_bpe_user.password:
+ file: ./secrets/db_cos_bpe_user.password
+ db_cos_bpe_user_camunda.password:
+ file: ./secrets/db_cos_bpe_user_camunda.password
+
+ app_client_trust_certificates.pem:
+ file: ./secrets/app_client_trust_certificates.pem
+ app_client_certificate_private_key.pem.password:
+ file: ./secrets/app_client_certificate_private_key.pem.password
+
+ app_dic_client_certificate.pem:
+ file: ./secrets/app_dic_client_certificate.pem
+ app_dic_client_certificate_private_key.pem:
+ file: ./secrets/app_dic_client_certificate_private_key.pem
+
+ app_hrp_client_certificate.pem:
+ file: ./secrets/app_hrp_client_certificate.pem
+ app_hrp_client_certificate_private_key.pem:
+ file: ./secrets/app_hrp_client_certificate_private_key.pem
+
+ app_cos_client_certificate.pem:
+ file: ./secrets/app_cos_client_certificate.pem
+ app_cos_client_certificate_private_key.pem:
+ file: ./secrets/app_cos_client_certificate_private_key.pem
+
+networks:
+ internet:
+ dic-fhir-frontend:
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: 172.20.0.64/28
+ dic-fhir-backend:
+ dic-bpe-frontend:
+ dic-bpe-backend:
+ hrp-fhir-frontend:
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: 172.20.0.80/28
+ hrp-fhir-backend:
+ hrp-bpe-frontend:
+ hrp-bpe-backend:
+ cos-fhir-frontend:
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: 172.20.0.96/28
+ cos-fhir-backend:
+ cos-bpe-frontend:
+ cos-bpe-backend:
+
+
+volumes:
+ db-data:
+ name: dsf-process-tutorial-db
Prerequisites | Exercise 1 | Exercise 1.1 | Exercise 2 | Exercise 3 | Exercise 4 | Exercise 5
"}');export{y as comp,E as data}; diff --git a/assets/exercise1-simpleProcess.html-BxXADBB1.js b/assets/exercise1-simpleProcess.html-BxXADBB1.js new file mode 100644 index 000000000..0708e030d --- /dev/null +++ b/assets/exercise1-simpleProcess.html-BxXADBB1.js @@ -0,0 +1 @@ +import{_ as n}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,b as o,e as r,f as a,d as t,a as i,r as c,o as d}from"./app-BIWb5uIp.js";const h={};function p(u,e){const s=c("RouteLink");return d(),l("div",null,[o("p",null,[r(s,{to:"/oldstable/tutorial/prerequisites.html"},{default:a(()=>e[0]||(e[0]=[t("Prerequisites")])),_:1}),e[6]||(e[6]=t(" | Exercise 1 | ")),r(s,{to:"/oldstable/tutorial/exercise11-processDebugging.html"},{default:a(()=>e[1]||(e[1]=[t("Exercise 1.1")])),_:1}),e[7]||(e[7]=t(" | ")),r(s,{to:"/oldstable/tutorial/exercise2-inputParameters.html"},{default:a(()=>e[2]||(e[2]=[t("Exercise 2")])),_:1}),e[8]||(e[8]=t(" | ")),r(s,{to:"/oldstable/tutorial/exercise3-messageEvents.html"},{default:a(()=>e[3]||(e[3]=[t("Exercise 3")])),_:1}),e[9]||(e[9]=t(" | ")),r(s,{to:"/oldstable/tutorial/exercise4-exclusiveGateways.html"},{default:a(()=>e[4]||(e[4]=[t("Exercise 4")])),_:1}),e[10]||(e[10]=t(" | ")),r(s,{to:"/oldstable/tutorial/exercise5-eventBasedGateways.html"},{default:a(()=>e[5]||(e[5]=[t("Exercise 5")])),_:1})]),e[32]||(e[32]=i('The first exercise focuses on setting up the testing environment used in this tutorial and shows how to implement and execute a simple BPMN process.
With this exercise we will take a look at the general setup of the tutorial code base, modify a service class and execute the service within a simple demo process.
The tutorial project consists of three parts: A test-data-generator
project used to generate X.509 certificates and FHIR resources during the maven build of the project. The certificates and FHIR resources are needed to start DSF instances simulating installations at three different organizations used for this tutorial. The DSF instances are configured using a docker-compose.yml
file in the test-setup
folder. The docker-compose test setup uses a single PostgreSQL database server, a single nginx reverse proxy as well as three separate DSF FHIR server- and 3 separate DSF BPE server instances. The tutorial-process
project contains all resource (FHIR resources, BPMN process models and Java code) for the actual DSF process plugin.
Java code for the tutorial-process
project is located at src/main/java
, FHIR resources and BPMN process models at src/main/resources
as well as prepared JUnit tests to verify your solution at src/test/java
.
The most imported Java class used to specify the process plugin for the DSF BPE server is a class that implements the org.highmed.dsf.bpe.ProcessPluginDefinition
interface from the DSF dsf-bpe-process-base module. The DSF BPE server searches for classes implementing this interface using the Java ServiceLoader mechanism. For this tutorial the TutorialProcessPluginDefinition
class implements this interface. It is appropriately specified in the src/main/resources/META-INF/services/org.highmed.dsf.bpe
.ProcessPluginDefinition file. The TutorialProcessPluginDefinition
class is used to specify name and version of the process plugin, what BPMN processes are to be deployed and what FHIR resources and required by the BPMN processes. For the implementation of service task and message events of the processes a special Spring context is used for every process plugin. The TutorialProcessPluginDefinition
class specifies what via Spring-Framework configuration class with Spring Beans are used for the process plugin specific Spring Context. For this plugin the TutorialConfig
cass is used to define Spring Beans.
The business process engine used by the DSF BPE server is based on the OpenSource Camunda Process Engine 7. In order to specify what Java code should be executed for a BPMN ServiceTask you need to specify the fully-qualified Java class name in the ServiceTask inside the BPMN model. To be executable the Java class needs to extend the org.highmed.dsf.bpe.delegate.AbstractServiceDelegate
from the DSF dsf-bpe-process-base module and the class needs to be defined as as Spring Bean.
Business process instances are started or the execution continued via FHIR Task resources. The Task resource specifies what process to instantiate or continue, what organization is requesting this action and what organization is the target for the request. When a Task resource starts a process we call it "leading", when it continues a process it's called "current". This differentiation is important for multi-instance use cases not covered by this tutorial. Each Java class extending the abstract class org.highmed.dsf.bpe.delegate.AbstractServiceDelegate
has methods to access both types of Task resources.
FHIR ActivityDefinition resources are used to announce what processes can be instantiated at a given DSF instance. These resources are used by the DSF to specify what profile the Task resource needs to conform to and what BPMN message name is used to correlate the appropriate start or intermediate event within the BPMN model. The ActivityDefinition also defines what kind of organization can request the instantiation or continuation of a process instance and what kind of organization are allowed to fulfill the request.
',15)),o("p",null,[e[13]||(e[13]=t("We will take a closer look as ")),e[14]||(e[14]=o("a",{href:"http://hl7.org/fhir/R4/activitydefinition.html",target:"_blank",rel:"noopener noreferrer"},"ActivityDefinition",-1)),e[15]||(e[15]=t(" resources in ")),r(s,{to:"/oldstable/tutorial/exercise3-messageEvents.html"},{default:a(()=>e[11]||(e[11]=[t("Exercise 3")])),_:1}),e[16]||(e[16]=t(" and ")),r(s,{to:"/oldstable/tutorial/exercise5-eventBasedGateways.html"},{default:a(()=>e[12]||(e[12]=[t("Exercise 5")])),_:1}),e[17]||(e[17]=t("."))]),e[33]||(e[33]=i('HelloDic#doExecute
method that logs the recipient organization identifier from the "leading" Task.HelloDic
class as a singleton bean in the TutorialConfig class.hello-dic.bpmn
process model.highmedorg_helloDic
process to only allow local clients to instantiate the process via a helloDic
message.Execute a maven build of the dsf-process-tutorial parent module via:
mvn clean install -Pexercise-1
Verify that the build was successful and no test failures occurred.
To verify the highmedorg_helloDic
process can be executed successfully, we need to deploy it into a DSF instance and execute the process. The maven install
build is configured to create a process jar file with all necessary resources and to copy the jar to the appropriate locations of the docker test setup.
Test_DIC
organization in a console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-fhir
Verify the DSF FHIR server started successfully. You can access the webservice of the DSF FHIR server at https://dic/fhir.
The DSF FHIR server uses a server certificate that was generated during the first maven install build. To authenticate yourself to the server you can use the client certificate located at .../dsf-process-tutorial/test-data-generator/cert/Webbrowser_Test_User/Webbrowser_Test_User_certificate.p12
(Password: password). Add the certificate and the generated Root CA to your browser certificate store.
Caution: If you add the generated Root CA to your browsers certificate store as a trusted Root CA, make sure you are the only one with access to the private key at .../dsf-process-tutorial/test-data-generator/cert/ca/testca_private-key.pem
.
Test_DIC
organization in a second console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-bpe
Verify the DSF BPE server started successfully and deployed the highmedorg_helloDic process. The DSF BPE server should print a message that the process was deployed. The DSF FHIR server should now have a new ActivityDefinition resource. Go to https://dic/fhir/ActivityDefinition to check if the expected resource was created by the BPE while deploying the process. The returned FHIR Bundle should contain a single ActivityDefinition. Also, go to https://dic/fhir/StructureDefinition?url=http://highmed.org/fhir/StructureDefinition/task-hello-dic to check if the expected Task profile was created.
highmedorg_helloDic
process by posting an appropriate FHIR Task resource to the DSF FHIR server:The Task resource is used to tell the DSF BPE server via the DSF FHIR server that a specific organization wants to start (or continue) one process instance at a specified organization. The needed Task resource can be generated and posted to the DSF FHIR server by executing the main
method of the org.highmed.dsf.process.tutorial.TutorialExampleStarter
class. For the TutorialExampleStarter to work the location of the client certificate and its password need to be specified:
.../dsf-process-tutorial/test-data-generator/cert/Webbrowser_Test_User/Webbrowser_Test_User_certificate.p12
), 2. password for the client certificate (password
)DSF_CLIENT_CERTIFICATE_PATH
and DSF_CLIENT_CERTIFICATE_PASSWORD
with the appropriate values.Verify that the FHIR Task resource could be created at the DSF FHIR server. The TutorialExampleStarter class should print a message HTTP 201: Created
showing that the Task resource was created.
Verify that the highmedorg_helloDic
process was executed by the DSF BPE server. The BPE server should print a message showing that the process was started, print the log message you added to the HelloDic
class and end with a message showing that the process finished.
Prerequisites | Exercise 1 | Exercise 1.1 | Exercise 2 | Exercise 3 | Exercise 4 | Exercise 5
"}');export{m as comp,b as data}; diff --git a/assets/exercise11-processDebugging.html-B5eMvcvL.js b/assets/exercise11-processDebugging.html-B5eMvcvL.js new file mode 100644 index 000000000..da82abb12 --- /dev/null +++ b/assets/exercise11-processDebugging.html-B5eMvcvL.js @@ -0,0 +1 @@ +import{_ as a}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as n,b as l,e as s,f as r,d as t,a as o,r as d,o as u}from"./app-BIWb5uIp.js";const p="/photos/guideline/tutorial/eclipse.png",g="/photos/guideline/tutorial/intelliJ.png",m={};function b(c,e){const i=d("RouteLink");return u(),n("div",null,[l("p",null,[s(i,{to:"/oldstable/tutorial/prerequisites.html"},{default:r(()=>e[0]||(e[0]=[t("Prerequisites")])),_:1}),e[6]||(e[6]=t(" | ")),s(i,{to:"/oldstable/tutorial/exercise1-simpleProcess.html"},{default:r(()=>e[1]||(e[1]=[t("Exercise 1")])),_:1}),e[7]||(e[7]=t(" | Exercise 1.1 | ")),s(i,{to:"/oldstable/tutorial/exercise2-inputParameters.html"},{default:r(()=>e[2]||(e[2]=[t("Exercise 2")])),_:1}),e[8]||(e[8]=t(" | ")),s(i,{to:"/oldstable/tutorial/exercise3-messageEvents.html"},{default:r(()=>e[3]||(e[3]=[t("Exercise 3")])),_:1}),e[9]||(e[9]=t(" | ")),s(i,{to:"/oldstable/tutorial/exercise4-exclusiveGateways.html"},{default:r(()=>e[4]||(e[4]=[t("Exercise 4")])),_:1}),e[10]||(e[10]=t(" | ")),s(i,{to:"/oldstable/tutorial/exercise5-eventBasedGateways.html"},{default:r(()=>e[5]||(e[5]=[t("Exercise 5")])),_:1})]),e[30]||(e[30]=o('This exercise looks at how to use the Java debugger of your IDE to remote debug the execution of a process plugin.
The DSF FHIR server and the DSF BPE server applications are written in Java and as such are execute on a headless JRE 11 within their docker containers. Command line arguments can be passed to the JVM inside the ghcr.io/highmed/fhir and ghcr.io/highmed/bpe docker images by specifying the environment variable EXTRA_JVM_ARGS
. This can be used for example to configure the minimum and maximum heap of the JVM; but can also be used to specify a remote debugging port, which we will use in this exercise.
Test_DIC
organization in a console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-fhir
Test_DIC
organization in second console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-bpe
Eclipse:
IntelliJ:
Create a debug breakpoint in the first line of the HelloDic
class doExecute
method.
Start your previously defined remote Java debugger in your IDE.
Execute the TutorialExampleStarter
class to start highmed_helloDic
process.
User your IDE's debugger to step thru the code of the HelloDic
class doExecute
method.
Prerequisites | Exercise 1 | Exercise 1.1 | Exercise 2 | Exercise 3 | Exercise 4 | Exercise 5
"}');export{f as comp,k as data}; diff --git a/assets/exercise2-inputParameters.html-BLlHrkE8.js b/assets/exercise2-inputParameters.html-BLlHrkE8.js new file mode 100644 index 000000000..7d2013dcc --- /dev/null +++ b/assets/exercise2-inputParameters.html-BLlHrkE8.js @@ -0,0 +1,6 @@ +import{_ as n}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,b as r,e as i,f as a,d as t,a as o,r as d,o as h}from"./app-BIWb5uIp.js";const p={},u={start:"3"};function g(m,e){const s=d("RouteLink");return h(),l("div",null,[r("p",null,[i(s,{to:"/oldstable/tutorial/prerequisites.html"},{default:a(()=>e[0]||(e[0]=[t("Prerequisites")])),_:1}),e[6]||(e[6]=t(" | ")),i(s,{to:"/oldstable/tutorial/exercise1-simpleProcess.html"},{default:a(()=>e[1]||(e[1]=[t("Exercise 1")])),_:1}),e[7]||(e[7]=t(" | ")),i(s,{to:"/oldstable/tutorial/exercise11-processDebugging.html"},{default:a(()=>e[2]||(e[2]=[t("Exercise 1.1")])),_:1}),e[8]||(e[8]=t(" | Exercise 2 | ")),i(s,{to:"/oldstable/tutorial/exercise3-messageEvents.html"},{default:a(()=>e[3]||(e[3]=[t("Exercise 3")])),_:1}),e[9]||(e[9]=t(" | ")),i(s,{to:"/oldstable/tutorial/exercise4-exclusiveGateways.html"},{default:a(()=>e[4]||(e[4]=[t("Exercise 4")])),_:1}),e[10]||(e[10]=t(" | ")),i(s,{to:"/oldstable/tutorial/exercise5-eventBasedGateways.html"},{default:a(()=>e[5]||(e[5]=[t("Exercise 5")])),_:1})]),e[66]||(e[66]=o('In order to configure processes that are packaged as process plugins, we will take a look at two possibilities on how to pass parameters to a process. The goal of this exercise is to enhance the highmedorg_helloDic
process by trying them both.
DSF process plugins can be configured with input parameters using two different approaches:
Environment variables are the same for all running process instances and allow static configuration of processes. They can be defined by adding a member variable having the Spring-Framework @Value annotation to the configuration class TutorialConfig
. The value of the annotation uses the ${..}
notation and follows the form ${some.property:defaultValue}
, where each dot in the property name corresponds to an underscore in the environment variable and environment variables are always written upper-case. The property some.property
therefore corresponds to the environment variable SOME_PROPERTY
.
To create an automated documentation of environment variables during the Maven build process, the DSF provided @ProcessDocumentation annotation from the package org.highmed.dsf.tools.generator
can be used. The pom.xml
of the tutorial-process
submodule calls the DSF provided DocumentGenerator class from the same package during the prepare-package phase of the build process. The generator searches for all @ProcessDocumentation annotations and generates a Markdown documentation based on the annotation's values in the target folder.
Providing input parameters to a specific process instance allows for dynamic configuration of process instances. It can be done by sending additional values as part of the Task resource that starts or continues a process instance. It should be noted that a FHIR profile must be created for each Task resource, i.e. for each message event in a process model, which inherits from the DSF Task Base Profile. This base profile defines three default input parameters:
message-name
(mandatory 1..1): the name of the BPMN message event, same as in the BPMN modelbusiness-key
(optional 0..1): used to identify process instancescorrelation-key
(optional 0..1): used to identify multi-instance process instances used for messaging multiple targetsA later exercise will examine these input parameters and their meaning in more detail.
',13)),r("p",null,[e[12]||(e[12]=t("Since input parameters of ")),e[13]||(e[13]=r("a",{href:"http://hl7.org/fhir/R4/task.html",target:"_blank",rel:"noopener noreferrer"},"Task",-1)),e[14]||(e[14]=t(" resources are identified by predefined codes, they are defined via FHIR ")),e[15]||(e[15]=r("a",{href:"http://hl7.org/fhir/R4/codesystem.html",target:"_blank",rel:"noopener noreferrer"},"CodeSystem",-1)),e[16]||(e[16]=t(" and ")),i(s,{to:"/oldstable/tutorial/hl7.org/fhir/R4/valueset.html"},{default:a(()=>e[11]||(e[11]=[t("ValueSet")])),_:1}),e[17]||(e[17]=t(" resources. The ")),e[18]||(e[18]=r("a",{href:"https://github.com/highmed/highmed-dsf/blob/main/dsf-fhir/dsf-fhir-validation/src/main/resources/fhir/CodeSystem/highmed-bpmn-message-0.5.0.xml",target:"_blank",rel:"noopener noreferrer"},"BPMN-Message CodeSystem",-1)),e[19]||(e[19]=t(" and the ")),e[20]||(e[20]=r("a",{href:"https://github.com/highmed/highmed-dsf/blob/main/dsf-fhir/dsf-fhir-validation/src/main/resources/fhir/ValueSet/highmed-bpmn-message-0.5.0.xml",target:"_blank",rel:"noopener noreferrer"},"BPMN-Message ValueSet",-1)),e[21]||(e[21]=t(" are used in the ")),e[22]||(e[22]=r("a",{href:"https://github.com/highmed/highmed-dsf/blob/main/dsf-fhir/dsf-fhir-validation/src/main/resources/fhir/StructureDefinition/highmed-task-base-0.5.0.xml",target:"_blank",rel:"noopener noreferrer"},"DSF Task Base Profile",-1)),e[23]||(e[23]=t(" to define the three default input parameters of ")),e[24]||(e[24]=r("a",{href:"http://hl7.org/fhir/R4/task.html",target:"_blank",rel:"noopener noreferrer"},"Task",-1)),e[25]||(e[25]=t(" resources."))]),e[67]||(e[67]=o(`To avoid the need to specify the version and release date for each CodeSystem, StructureDefinition (Task profile) and ValueSet resource, the placeholders #{version}
and #{date}
can be used. They are replaced with the values returned by the methods ProcessPluginDefinition#getVersion()
and ProcessPluginDefinition#getReleaseDate()
respectively during deployment of a process plugin by the DSF BPE server.
Read Access Tag
While writing FHIR resources on the DSF FHIR server is only allowed by the own organization (except Task), rules have to be defined for reading FHIR resources by external organizations (again except Task). The Resource.meta.tag
field is used for this purpose. To allow read access for all organizations (the standard for metadata resources), the following read-access-tag
value can be written into this field:
<meta>
+ <tag>
+ <system value="http://highmed.org/fhir/CodeSystem/read-access-tag" />
+ <code value="ALL" />
+ </tag>
+</meta>
The read access rules for Task resources are defined through the fields Task.requester
and Task.restriction.recipient
. Therefore, no read-access-tag
is needed.
It is also possible to restrict read access of FHIR resources to organizations with a specific role in a consortium or a specific identifier, but this is not covered in the tutorial.
`,6)),r("p",null,[e[28]||(e[28]=t("The write access rules for ")),e[29]||(e[29]=r("a",{href:"http://hl7.org/fhir/R4/task.html",target:"_blank",rel:"noopener noreferrer"},"Task",-1)),e[30]||(e[30]=t(" resources are defined through the ")),e[31]||(e[31]=r("a",{href:"http://hl7.org/fhir/R4/activitydefinition.html",target:"_blank",rel:"noopener noreferrer"},"ActivityDefinition",-1)),e[32]||(e[32]=t(" resources belonging to the process. We will take a look at this in ")),i(s,{to:"/oldstable/tutorial/exercise3-messageEvents.html"},{default:a(()=>e[26]||(e[26]=[t("exercise 3")])),_:1}),e[33]||(e[33]=t(" and ")),i(s,{to:"/oldstable/tutorial/exercise5-eventBasedGateways.html"},{default:a(()=>e[27]||(e[27]=[t("exercise 5")])),_:1}),e[34]||(e[34]=t("."))]),e[68]||(e[68]=o('TutorialConfig
class specify the default value as false
.HelloDic
class, by modifying its constructor and using the new field of the TutorialConfig
class.HelloDic
class to decide whether the log message from exercise 1 should be printed.test-setup/docker-compose.yml
by adding the new environment variable to the service dic-bpe and set the value to "true"
.http://highmed.org/fhir/CodeSystem/tutorial
having a concept with code tutorial-input
.http://highmed.org/fhir/ValueSet/tutorial
that includes all concepts from the CodeSystem.string
to the task-hello-dic.xml
Task profile using the concept of the new CodeSystem as a fixed coding.HelloDic
class from the "leading" Task and add the value to the log message from exercise 1.TutorialExampleStarter
by adding the new input parameter with an arbitrary string.Execute a maven build of the dsf-process-tutorial
parent module via:
mvn clean install -Pexercise-2
Verify that the build was successful and no test failures occurred.
To verify the highmedorg_helloDic
process can be executed successfully, we need to deploy it into a DSF instance and execute the process. The maven install
build is configured to create a process jar file with all necessary resources and copy the jar to the appropriate locations of the docker test setup.
Test_DIC
organization in a console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-fhir
Verify the DSF FHIR server started successfully.
Test_DIC
organization in second console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-bpe
Verify the DSF BPE server started successfully and deployed the highmedorg_helloDic process.
',17)),r("ol",u,[r("li",null,[e[36]||(e[36]=t("Start the ")),e[37]||(e[37]=r("code",null,"highmedorg_helloDic",-1)),e[38]||(e[38]=t(" process by posting an appropriate FHIR ")),e[39]||(e[39]=r("a",{href:"http://hl7.org/fhir/R4/task.html",target:"_blank",rel:"noopener noreferrer"},"Task",-1)),e[40]||(e[40]=t(" resource to the DSF FHIR server of the ")),e[41]||(e[41]=r("code",null,"Test_DIC",-1)),e[42]||(e[42]=t(" organization: Execute the ")),e[43]||(e[43]=r("code",null,"main",-1)),e[44]||(e[44]=t(" method of the ")),e[45]||(e[45]=r("code",null,"org.highmed.dsf.process.tutorial.TutorialExampleStarter",-1)),e[46]||(e[46]=t(" class as in ")),i(s,{to:"/oldstable/tutorial/exercise1-simpleProcess.html"},{default:a(()=>e[35]||(e[35]=[t("exercise 1")])),_:1}),e[47]||(e[47]=t(" to create the ")),e[48]||(e[48]=r("a",{href:"http://hl7.org/fhir/R4/task.html",target:"_blank",rel:"noopener noreferrer"},"Task",-1)),e[49]||(e[49]=t(" resource needed to start the ")),e[50]||(e[50]=r("code",null,"highmedorg_helloDic",-1)),e[51]||(e[51]=t(" process."))])]),e[69]||(e[69]=o('Verify that the highmedorg_helloDic
process was executed by the DSF BPE server. The BPE server should:
HelloDic
implementation.Check that you can disable logging of you message by modifying the docker-compose.yml
file and configuring your environment variable with the value "false"
or removing the environment variable.
Note: Changes to environment variable require recreating the docker container.
Also check that modification to the Task input parameter specified in the TutorialExampleStarter
class, have the appropriate effect on your log message.
Prerequisites | Exercise 1 | Exercise 1.1 | Exercise 2 | Exercise 3 | Exercise 4 | Exercise 5
"}');export{k as comp,b as data}; diff --git a/assets/exercise3-messageEvents.html-tI2HZN1H.js b/assets/exercise3-messageEvents.html-tI2HZN1H.js new file mode 100644 index 000000000..e80f70d26 --- /dev/null +++ b/assets/exercise3-messageEvents.html-tI2HZN1H.js @@ -0,0 +1,32 @@ +import{_ as o}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,b as n,e as t,f as a,d as i,a as r,r as h,o as d}from"./app-BIWb5uIp.js";const p="/photos/guideline/tutorial/ex3.png",c={};function g(k,e){const s=h("RouteLink");return d(),l("div",null,[n("p",null,[t(s,{to:"/oldstable/tutorial/prerequisites.html"},{default:a(()=>e[0]||(e[0]=[i("Prerequisites")])),_:1}),e[6]||(e[6]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise1-simpleProcess.html"},{default:a(()=>e[1]||(e[1]=[i("Exercise 1")])),_:1}),e[7]||(e[7]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise11-processDebugging.html"},{default:a(()=>e[2]||(e[2]=[i("Exercise 1.1")])),_:1}),e[8]||(e[8]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise2-inputParameters.html"},{default:a(()=>e[3]||(e[3]=[i("Exercise 2")])),_:1}),e[9]||(e[9]=i(" | Exercise 3 | ")),t(s,{to:"/oldstable/tutorial/exercise4-exclusiveGateways.html"},{default:a(()=>e[4]||(e[4]=[i("Exercise 4")])),_:1}),e[10]||(e[10]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise5-eventBasedGateways.html"},{default:a(()=>e[5]||(e[5]=[i("Exercise 5")])),_:1})]),e[33]||(e[33]=r('Communication between organizations is modeled using message flow in BPMN processes. The third exercise shows how a process at one organization can trigger a process at another organization.
To demonstrate communication between two organizations we will configure message flow between the processes highmedorg_helloDic
and highmedorg_helloCos
. The processes are then to be executed at the organizations Test_DIC
and Test_COS
respectively in the docker test setup, with the former triggering execution of the latter by automatically sending a Task from organization Test_DIC
to organization Test_COS
.
BPMN processes are instantiated and started within the DSF by creating a matching FHIR Task resource in the DSF FHIR server. This is true for executing a process on the local DSF BPE server by manually creating a Task resource, but also works by creating and starting a process instance at a remote DSF BPE server from an executing process automatically.
In order to exchange information between different processes, for example at two different organizations, BPMN message flow is used. Typically represented by a dashed line arrow between elements with black (send) and white (receive) envelop icons. The following BPMN collaboration diagram shows two processes. The process at "Organization 1" is sending a message to "Organization 2" which results in the instantiation and execution of new process instance at the second organization.
Every time message flow is used in a BPMN process for the DSF, a corresponding FHIR Task profile needs to be specified for every interaction. This profile specifies which process should be started or continued and what the message name is when correlating the appropriate Message Start Event or Intermediate Message Catch Event. A Business Key and a Correlation Key are specified if different process instances need to be linked to a single execution, for example to be able to send a message back.
FHIR ActivityDefinition resources are used to announce what processes can be instantiated at a given DSF instance. They also control what kind of organization can request the instantiation or continuation of a process instance and what kind of organization is allowed to fulfill the request.
In order to link the FHIR and BPMN worlds the BPMN process definition key needs to be specified following the pattern ^[-a-zA-Z0-9]+_[-a-zA-Z0-9]+$
for example:
domainorg_processKey
In addition the BPM process needs to specify a process version with the pattern ^\\d+.\\d+.\\d+$
for example:
1.0.0
This results in a canonical URL used to identify the process, for example:
http://domain.org/bpe/Process/processKey/1.0.0
The canonical URL is used for Task.instantiatesUri and ActivityDefinition.url / version.
FHIR ActivityDefinition resources are used to announce what processes can be instantiated at a given DSF instance and contain the authorization rules for the specified process. ActivityDefinition for the DSF need to comply with the http://highmed.org/fhir/StructureDefinition/activity-definition profile, with authorization rules configured using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization extension.
The authorization extension needs to be configured at least once and has four sub extensions:
String value specifying the message name of Message Start Event, Intermediate Message Catch Event or Message Receive Task this authorization rule should match. Can only be specified once per authorization rule extension.
Canonical URL value specifying the Task profile this authorization rule should match. Can only be specified once per authorization rule extension.
Coding value matching entries from the http://highmed.org/fhir/ValueSet/process-authorization-requester ValueSet:
',28)),n("ul",null,[e[16]||(e[16]=r('LOCAL_ORGANIZATION A local organization with a specific identifier. The organization identifier needs to specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-organization extension.
REMOTE_ORGANIZATION A remote (non local) organization with a specific identifier. The organization identifier needs to specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-organization extension.
LOCAL_ROLE A local organizations with a specific role defined via OrganizationAffiliation. Role and consortium identifier need to be specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-consortium-role extension.
Coding value matching entries from the http://highmed.org/fhir/ValueSet/process-authorization-recipient ValueSet.
LOCAL_ORGANIZATION Organization with a specific identifier. The organization identifier needs to specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-organization extension.
LOCAL_ROLE Organizations with a specific role defined via OrganizationAffiliation. Role and consortium identifier need to be specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-consortium-role extension.
LOCAL_ALL All organizations regardless of their identifier or role in a consortium.
The local organization of a DSF instance is configured using the environment variables ORG_HIGHMED_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE for the DSF FHIR server and ORG_HIGHMED_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE for the DSF BPE server.
The following example specifies that process execution can only be requested by a organization with a specific identifier and only allows execution of the process in the DSF instance of an organization with a specific identifier.
<extension url="http://highmed.org/fhir/StructureDefinition/extension-process-authorization">
+ <extension url="message-name">
+ <valueString value="some-message-name" />
+ </extension>
+ <extension url="task-profile">
+ <valueCanonical value="http://foo.org/fhir/StructureDefinition/profile|#{version}" />
+ </extension>
+ <extension url="requester">
+ <valueCoding>
+ <extension url="http://highmed.org/fhir/StructureDefinition/extension-process-authorization-organization">
+ <valueIdentifier>
+ <system value="http://highmed.org/sid/organization-identifier" />
+ <value value="identifier.remote.org" />
+ </valueIdentifier>
+ </extension>
+ <system value="http://highmed.org/fhir/CodeSystem/process-authorization" />
+ <code value="REMOTE_ORGANIZATION" />
+ </valueCoding>
+ </extension>
+ <extension url="recipient">
+ <valueCoding>
+ <extension url="http://highmed.org/fhir/StructureDefinition/extension-process-authorization-organization">
+ <valueIdentifier>
+ <system value="http://highmed.org/sid/organization-identifier" />
+ <value value="identifier.local.org" />
+ </valueIdentifier>
+ </extension>
+ <system value="http://highmed.org/fhir/CodeSystem/process-authorization" />
+ <code value="LOCAL_ORGANIZATION" />
+ </valueCoding>
+ </extension>
+</extension>
highmedorg_helloDic
process in the hello-dic.bpmn
file and replace the End Event with a Message End Event. Configure input parameters instantiatesUri
, profile
and messageName
in the BPMN model for the Message End Event. Set the message name of the Message End Event and configure it to be executed using the HelloCosMessage class.helloCos
as the message name. Figure out what the appropriate instantiatesUri
value is, based on the name (process definition key) of the process to be triggered.highmedorg_helloCos
process in the hello-cos.bpmn
file and configure the message name of the Message Start Event with the same value as the message name of the Message End Event in the highmedorg_helloDic
process.helloCos
message.highmedorg_helloCos
process and configure the authorization extension to allow the Test_DIC
organization as the requester and the Test_COS
organization as the recipient.highmedorg_helloCos
process and its resources to the TutorialProcessPluginDefinition
class.HelloDic
service class to set the target
process variable for the Test_COS
organization.HelloCosMessage
class as a spring in the TutorialConfig
class.Execute a maven build of the dsf-process-tutorial
parent module via:
mvn clean install -Pexercise-3
Verify that the build was successful and no test failures occurred.
To verify the highmedorg_helloDic
and highmedorg_helloCos
processes can be executed successfully, we need to deploy them into DSF instances and execute the highmedorg_helloDic
process. The maven install
build is configured to create a process jar file with all necessary resources and copy the jar to the appropriate locations of the docker test setup.
Test_DIC
organization in a console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-fhir
Verify the DSF FHIR server started successfully.
Test_DIC
organization in another console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-bpe
Verify the DSF BPE server started successfully and deployed the highmedorg_helloDic process.
Test_COS
organization in a console at location .../dsf-process-tutorial/test-setup
:docker-compose up cos-fhir
Verify the DSF FHIR server started successfully. You can access the webservice of the DSF FHIR server at https://cos/fhir.
The DSF FHIR server uses a server certificate that was generated during the first maven build. To authenticate yourself to the server you can use the client certificate located at .../dsf-process-tutorial/test-data-generator/cert/Webbrowser_Test_User/Webbrowser_Test_User_certificate.p12
(Password: password).
Test_COS
organization in another console at location .../dsf-process-tutorial/test-setup
:docker-compose up cos-bpe
Verify the DSF BPE server started successfully and deployed the highmedorg_helloCos
process. The DSF BPE server should print a message that the process was deployed. The DSF FHIR server should now have a new ActivityDefinition resource. Go to https://cos/fhir/ActivityDefinition to check if the expected resource was created by the BPE while deploying the process. The returned FHIR Bundle should contain two ActivityDefinition resources. Also, go to https://cos/fhir/StructureDefinition?url=http://highmed.org/fhir/StructureDefinition/task-hello-cos to check if the expected Task profile was created.
highmedorg_helloDic
process by posting a specific FHIR Task resource to the DSF FHIR server of the Test_DIC
organization: Execute therefore the main
method of the org.highmed.dsf.process.tutorial.TutorialExampleStarter
class to create the Task resource needed to start the highmedorg_helloDic
process.Verify that the FHIR Task resource was created at the DSF FHIR server and the highmedorg_helloDic
process was executed by the DSF BPE server of the Test_DIC
organization. The DSF BPE server of the Test_DIC
organization should print a message showing that a Task resource to start the highmedorg_helloCos
process was send to the Test_COS
organization.
Verify that a FHIR Task resource was created at the DSF FHIR server of the Test_COS
organization and the highmedorg_helloCos
process was then executed by the DSF BPE server of the Test_COS
organization.
Prerequisites | Exercise 1 | Exercise 1.1 | Exercise 2 | Exercise 3 | Exercise 4 | Exercise 5
"}');export{m as comp,v as data}; diff --git a/assets/exercise4-exclusiveGateways.html-BFc2qSSk.js b/assets/exercise4-exclusiveGateways.html-BFc2qSSk.js new file mode 100644 index 000000000..8699efb43 --- /dev/null +++ b/assets/exercise4-exclusiveGateways.html-BFc2qSSk.js @@ -0,0 +1,4 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,b as o,e as i,f as a,d as s,a as n,r as d,o as c}from"./app-BIWb5uIp.js";const h={};function u(p,e){const t=d("RouteLink");return c(),l("div",null,[o("p",null,[i(t,{to:"/oldstable/tutorial/prerequisites.html"},{default:a(()=>e[0]||(e[0]=[s("Prerequisites")])),_:1}),e[6]||(e[6]=s(" | ")),i(t,{to:"/oldstable/tutorial/exercise1-simpleProcess.html"},{default:a(()=>e[1]||(e[1]=[s("Exercise 1")])),_:1}),e[7]||(e[7]=s(" | ")),i(t,{to:"/oldstable/tutorial/exercise11-processDebugging.html"},{default:a(()=>e[2]||(e[2]=[s("Exercise 1.1")])),_:1}),e[8]||(e[8]=s(" | ")),i(t,{to:"/oldstable/tutorial/exercise2-inputParameters.html"},{default:a(()=>e[3]||(e[3]=[s("Exercise 2")])),_:1}),e[9]||(e[9]=s(" | ")),i(t,{to:"/oldstable/tutorial/exercise3-messageEvents.html"},{default:a(()=>e[4]||(e[4]=[s("Exercise 3")])),_:1}),e[10]||(e[10]=s(" | Exercise 4 | ")),i(t,{to:"/oldstable/tutorial/exercise5-eventBasedGateways.html"},{default:a(()=>e[5]||(e[5]=[s("Exercise 5")])),_:1})]),e[24]||(e[24]=n(`Different execution paths in a process based on the state of process variables can be achieved using Exclusive Gateways. In Exercise 4 we will examine how this can be implemented by modifying the highmedorg_helloDic
process.
Different sequence flows during the execution of a process instance can be modeled using BPMN Exclusive Gateways. For each outgoing sequence flow of the gateway, a BPMN Condition Expression can be added to the process model, deciding whether a sequence flow should be followed. Thereby, all condition decisions must be in an XOR relationship to each other.
A BPMN Condition Expression uses the \${..}
notation. Within the curly braces all execution variables of a process instance can be accessed, e.g. the ones that were stored in a previous Java implementation of a BPMN ServiceTask. For example, the BPMN Condition Expression \${cohortSize > 100}
checks whether the value in the execution variable cohortSize is greater than 100.
Via the DelegateExecution execution
parameter of the doExecute
method of a class extending AbstractServiceDelegate
, we can write and read process variables of the current process instance. The following code listing show how to write and read a boolean
variable:
{
+ execution.setVariable("variable-name", Variables.booleanValue(false));
+ boolean variable = (boolean) execution.getVariable("variable-name");
+}
For more details on process variables see the Camunda documentation.
HelloDic
class, write an algorithm deciding based on the "leading" Task's input parameter tutorial-input
, whether the highmedorg_helloCos
process should be started.highmedorg_helloDic
process model and two outgoing sequence flows - the first starting process highmedorg_helloDic
, the second stopping process highmedorg_helloDic
without starting process highmedorg_helloCos
.Execute a maven build of the dsf-process-tutorial parent module via:
mvn clean install -Pexercise-4
Verify that the build was successful and no test failures occurred.
To verify the highmedorg_helloDic
and highmedorg_helloCos
processes can be executed successfully, we need to deploy them into DSF instances and execute the highmedorg_helloDic
process. The maven install
build is configured to create a process jar file with all necessary resources and copy the jar to the appropriate locations of the docker test setup.
Test_DIC
organization in a console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-fhir
Verify the DSF FHIR server started successfully.
Test_DIC
organization in a second console at location .../dsf-process-tutorial/test-setup
:docker-compose up cos-fhir
Verify the DSF FHIR server started successfully.
Test_COS
organization in a fourth console at location .../dsf-process-tutorial/test-setup
:docker-compose up cos-bpe
Verify the DSF BPE server started successfully and deployed the highmedorg_helloCos
process.
highmedorg_helloDic
process by posting a specific FHIR Task resource to the DSF FHIR server of the Test_DIC
organization: Execute therefore the main
method of the org.highmed.dsf.process.tutorial.TutorialExampleStarter
class to create the Task resource needed to start the highmedorg_helloDic
process.Verify that the highmedorg_helloDic
process was executed successfully by the Test_DIC
DSF BPE server and possibly the highmedorg_helloCos
process by the Test_COS
DSF BPE server, depending on whether decision of your algorithm based on the input parameter allowed to start the highmedorg_helloDic
process.
Prerequisites | Exercise 1 | Exercise 1.1 | Exercise 2 | Exercise 3 | Exercise 4 | Exercise 5
"}');export{b as comp,f as data}; diff --git a/assets/exercise5-eventBasedGateways.html-DEvFgy-0.js b/assets/exercise5-eventBasedGateways.html-DEvFgy-0.js new file mode 100644 index 000000000..2ccc0d1a2 --- /dev/null +++ b/assets/exercise5-eventBasedGateways.html-DEvFgy-0.js @@ -0,0 +1,48 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,b as n,e as t,f as a,d as i,a as h,r as o,o as d}from"./app-BIWb5uIp.js";const p="/photos/guideline/tutorial/ex5.png",k={};function g(c,e){const s=o("RouteLink");return d(),l("div",null,[n("p",null,[t(s,{to:"/oldstable/tutorial/prerequisites.html"},{default:a(()=>e[0]||(e[0]=[i("Prerequisites")])),_:1}),e[6]||(e[6]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise1-simpleProcess.html"},{default:a(()=>e[1]||(e[1]=[i("Exercise 1")])),_:1}),e[7]||(e[7]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise11-processDebugging.html"},{default:a(()=>e[2]||(e[2]=[i("Exercise 1.1")])),_:1}),e[8]||(e[8]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise2-inputParameters.html"},{default:a(()=>e[3]||(e[3]=[i("Exercise 2")])),_:1}),e[9]||(e[9]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise3-messageEvents.html"},{default:a(()=>e[4]||(e[4]=[i("Exercise 3")])),_:1}),e[10]||(e[10]=i(" | ")),t(s,{to:"/oldstable/tutorial/exercise4-exclusiveGateways.html"},{default:a(()=>e[5]||(e[5]=[i("Exercise 4")])),_:1}),e[11]||(e[11]=i(" | Exercise 5"))]),e[24]||(e[24]=h('In the final exercise we will look at message flow between three organizations as well as how to continue a waiting process if no return message arrives. With this exercise we will add a third process and complete a message loop from Test_DIC
to Test_COR
to Test_HRP
back to Test_DIC
.
If an existing and started process instance is waiting for a message from another organization, the corresponding FHIR Task may never arrive. Either because the other organization decides to never send the "message" or because some technical problem prohibits the Task resource from being posted to the DSF FHIR server. This would result in stale process instances that never finish.
In order to solve this problem we can add an Event Based Gateway to the process waiting for a response and then either handle a Task resource with the response and finish the process in a success state or fire of an Intermediate Timer Catch Event after a defined wait period and finish the process in an error state. The following BPMN collaboration diagram shows how the process at the first organization would look like if two different message or no message could be received:
For Timer Events the duration until the timer fires is specified using the ISO 8601 Durations format. Examples can be found in the Camunda 7 documentation.
In the example above the first organization is sending a "message" to the second and waiting for a reply. In order to correlate the return message with the waiting process instance, a unique identifier needs to be exchanged between both process instances. Within the DSF this is implemented using the process instance business-key and a corresponding Task.input parameter. For 1:1 communication relationships this is handled by the DSF BPE servers automatically, but the corresponding Task profiles need to define the business-key input parameter as mandatory.
If multiple message are send in a 1:n relationship with a n:1 return an additional correlation-key needs to be configured in order to correlate every bidirectional communication between two DSF instances.
FHIR ActivityDefinition resources are used to announce what processes can be instantiated at a given DSF instance and contain the authorization rules for the specified process. ActivityDefinition for the DSF need to comply with the http://highmed.org/fhir/StructureDefinition/activity-definition profile, with authorization rules configured using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization extension.
The authorization extension needs to be configured at least once and has four sub extensions:
String value specifying the message name of Message Start Event, Intermediate Message Catch Event or Message Receive Task this authorization rule should match. Can only be specified once per authorization rule extension.
Canonical URL value specifying the Task profile this authorization rule should match. Can only be specified once per authorization rule extension.
Coding value matching entries from the http://highmed.org/fhir/ValueSet/process-authorization-requester ValueSet:
LOCAL_ORGANIZATION A local organization with a specific identifier. The organization identifier needs to specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-organization extension.
REMOTE_ORGANIZATION A remote (non local) organization with a specific identifier. The organization identifier needs to specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-organization extension.
LOCAL_ROLE A local organizations with a specific role defined via OrganizationAffiliation. Role and consortium identifier need to be specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-consortium-role extension.
REMOTE_ROLE A remote (non local) organizations with a specific role defined via OrganizationAffiliation. Role and consortium identifier need to be specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-consortium-role extension.
LOCAL_ALL All local organizations regardless of their identifier or role in a consortium.
REMOTE_ALL All remote (non local) organizations regardless of their identifier or role in a consortium.
Coding value matching entries from the http://highmed.org/fhir/ValueSet/process-authorization-recipient ValueSet.
LOCAL_ORGANIZATION Organization with a specific identifier. The organization identifier needs to specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-organization extension.
LOCAL_ROLE Organizations with a specific role defined via OrganizationAffiliation. Role and consortium identifier need to be specified using the http://highmed.org/fhir/StructureDefinition/extension-process-authorization-consortium-role extension.
LOCAL_ALL All organizations regardless of their identifier or role in a consortium.
The local organization of a DSF instance is configured using the environment variables ORG_HIGHMED_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE for the DSF FHIR server and ORG_HIGHMED_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE for the DSF BPE server.
The following example specifies that process execution can only be requested by a organization with a specific identifier and only allows execution of the process in the DSF instance of an organization with a specific identifier.
<extension url="http://highmed.org/fhir/StructureDefinition/extension-process-authorization">
+ <extension url="message-name">
+ <valueString value="some-message-name" />
+ </extension>
+ <extension url="task-profile">
+ <valueCanonical value="http://foo.org/fhir/StructureDefinition/profile|#{version}" />
+ </extension>
+ <extension url="requester">
+ <valueCoding>
+ <extension url="http://highmed.org/fhir/StructureDefinition/extension-process-authorization-consortium-role">
+ <extension url="consortium">
+ <valueIdentifier>
+ <system value="http://highmed.org/sid/organization-identifier" />
+ <value value="identifier.consortium.org" />
+ </valueIdentifier>
+ </extension>
+ <extension url="role">
+ <valueCoding>
+ <system value="http://highmed.org/fhir/CodeSystem/organization-role" />
+ <code value="SOME_ROLE" />
+ </valueCoding>
+ </extension>
+ </extension>
+ <system value="http://highmed.org/fhir/CodeSystem/process-authorization" />
+ <code value="REMOTE_ROLE" />
+ </valueCoding>
+ </extension>
+ <extension url="recipient">
+ <valueCoding>
+ <extension url="http://highmed.org/fhir/StructureDefinition/extension-process-authorization-consortium-role">
+ <extension url="consortium">
+ <valueIdentifier>
+ <system value="http://highmed.org/sid/organization-identifier" />
+ <value value="identifier.consortium.org" />
+ </valueIdentifier>
+ </extension>
+ <extension url="role">
+ <valueCoding>
+ <system value="http://highmed.org/fhir/CodeSystem/organization-role" />
+ <code value="SOME_ROLE" />
+ </valueCoding>
+ </extension>
+ </extension>
+ <system value="http://highmed.org/fhir/CodeSystem/process-authorization" />
+ <code value="LOCAL_ROLE" />
+ </valueCoding>
+ </extension>
+</extension>
HelloCosMessage
and use the value from the Task.input parameter of the helloDic
Task to send it to the highmedorg_helloCos
process via a Task.input parameter in the helloCos
Task. Override the getAdditionalInputParameters
to configure a Task.input parameter to be send.highmedorg_helloCos
process to use a Message End Event to trigger the process in file hello-hrp.bpmn
. Figure out the values for the instantiatesUri
, profile
and messageName
input parameters of the Message End Event based on the AcitvityDefinition in file hello-hrp.xml
.highmedorg_helloDic
process: goodbyDic
message from the highmedorg_helloHrp
process.highmedorg_helloHrp
process after two minutes. Make sure both cases finish with a process End Event.hello-hrp.bpmn
and set the process definition key and version. Figure out the appropriate values based on the AcitvityDefinition in file hello-hrp.xml
.hello-hrp.bpmn
to the TutorialProcessPluginDefinition
and configure the FHIR resources needed for the three processes.HelloCos
, HelloHrpMessage
, HelloHrp
and GoodbyeDicMessage
classes as spring beans.Execute a maven build of the dsf-process-tutorial
parent module via:
mvn clean install -Pexercise-5
Verify that the build was successful and no test failures occurred.
To verify the highmedorg_helloDic
, highmedorg_helloCos
and highmedorg_helloHrp
processes can be executed successfully, we need to deploy them into DSF instances and execute the highmedorg_helloDic
process. The maven install
build is configured to create a process jar file with all necessary resources and copy the jar to the appropriate locations of the docker test setup.
Test_DIC
organization in a console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-fhir
Verify the DSF FHIR server started successfully.
Test_DIC
organization in a second console at location .../dsf-process-tutorial/test-setup
:docker-compose up dic-bpe
Verify the DSF BPE server started successfully and deployed the highmedorg_helloDic
process.
Test_COS
organization in a third console at location .../dsf-process-tutorial/test-setup
:docker-compose up cos-fhir
Verify the DSF FHIR server started successfully.
Test_COS
organization in a fourth console at location .../dsf-process-tutorial/test-setup
:docker-compose up cos-bpe
Verify the DSF BPE server started successfully and deployed the highmedorg_helloDic
process.
Test_HRP
organization in a fifth at location .../dsf-process-tutorial/test-setup
:docker-compose up hrp-fhir
Verify the DSF FHIR server started successfully. You can access the webservice of the DSF FHIR server at https://hrp/fhir.
The DSF FHIR server uses a server certificate that was generated during the first maven build. To authenticate yourself to the server you can use the client certificate located at .../dsf-process-tutorial/test-data-generator/cert/Webbrowser_Test_User/Webbrowser_Test_User_certificate.p12
(Password: password).
Test_HRP
organization in a sixth console at location .../dsf-process-tutorial/test-setup
:docker-compose up hrp-bpe
Verify the DSF BPE server started successfully and deployed the highmedorg_helloHrp
process. The DSF BPE server should print a message that the process was deployed. The DSF FHIR server should now have a new ActivityDefinition resource. Go to https://hrp/fhir/ActivityDefinition to check if the expected resource was created by the BPE while deploying the process. The returned FHIR Bundle should contain a three ActivityDefinition resources. Also, go to https://hrp/fhir/StructureDefinition?url=http://highmed.org/fhir/StructureDefinition/task-hello-hrp to check if the expected Task profile was created.
highmedorg_helloDic
process by posting a specific FHIR Task resource to the DSF FHIR server of the Test_DIC
organization: Execute therefore the main
method of the org.highmed.dsf.process.tutorial.TutorialExampleStarter
class to create the Task resource needed to start the highmedorg_helloDic
process.Verify that the FHIR Task resource was created at the DSF FHIR server and the highmedorg_helloDic
process was executed by the DSF BPE server of the Test_DIC
organization. The DSF BPE server of the Test_DIC
organization should print a message showing that a Task resource to start the highmedorg_helloCos
process was sent to the Test_COS
organization.
Verify that a FHIR Task resource was created at the DSF FHIR server of the Test_COS
organization and the highmedorg_helloCos
process was executed by the DSF BPE server of the Test_COS
organization. The DSF BPE server of the Test_COS
organization should print a message showing that a Task resource to start the highmedorg_helloHrp
process was send to the Test_HRP
organization.
Based on the value of the Task.input parameter you send, the highmedorg_helloHrp
process will either send a goodbyDic
message to the Test_DIC
organization or finish without sending a message.
To trigger the goodbyDic
message, use send-response
as the http://highmed.org/fhir/CodeSystem/tutorial#tutorial-input
input parameter.
Verify that the highmedorg_helloDic
process either finishes with the arrival of the goodbyDic
message or after waiting for two minutes.
Prerequisites | Exercise 1 | Exercise 1.1 | Exercise 2 | Exercise 3 | Exercise 4 | Exercise 5
"}');export{m as comp,B as data}; diff --git a/assets/feasibility.html-UPmBbOyd.js b/assets/feasibility.html-UPmBbOyd.js new file mode 100644 index 000000000..6c4191df2 --- /dev/null +++ b/assets/feasibility.html-UPmBbOyd.js @@ -0,0 +1 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as t,a as r,o as a}from"./app-BIWb5uIp.js";const s={};function n(o,e){return a(),t("div",null,e[0]||(e[0]=[r('Funded by the German Federal Ministry of Research and Education, 25 sites have installed the DSF to execute the Feasibility process. To perform feasibility queries, a researcher can register and query data on the FDPG (Forschungsdaten Portal für Gesundheit - Research Data Portal) website. Basic data of hospitalizations of over 8 million patients with over 40 million diagnoses and much more such as laboratory values or drug prescriptions are available. After a successful query, the data is made available in standardized FHIR format. Further information can be found in the flyer.
Medical routine data holds great promise for advancing research, yet its integration into a research context poses significant challenges. To address this, Medical Data Integration Centers have been established, by the medical informatics initiative to consolidate data from primary information systems into a central repository. However, relying on data from only one organization is rarely sufficient to answer complex research questions, so merging data across institutional boundaries is necessary.
To enable researchers to leverage this integrated data for specific research projects, there is a critical need for the ability to query cohort sizes across institutions. The feasibility process allows researchers to conduct automated and distributed feasibility queries, i.e., cohort size estimates. This process is executed according to the open standard BPMN 2.0, the underlying process data model is based on HL7 FHIR R4 resources.
Funded by the German Federal Ministry of Research and Education, 25 sites have installed the DSF to execute the Feasibility process. To perform feasibility queries, a researcher can register and query data on the FDPG (Forschungsdaten Portal für Gesundheit - Research Data Portal) website. Basic data of hospitalizations of over 8 million patients with over 40 million diagnoses and much more such as laboratory values or drug prescriptions are available. After a successful query, the data is made available in standardized FHIR format. Further information can be found in the flyer.
"}');export{c as comp,f as data}; diff --git a/assets/fhir.html-BWbIvPkF.js b/assets/fhir.html-BWbIvPkF.js new file mode 100644 index 000000000..d0f10122d --- /dev/null +++ b/assets/fhir.html-BWbIvPkF.js @@ -0,0 +1 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a as i,o as t}from"./app-BIWb5uIp.js";const o={};function n(l,e){return t(),s("div",null,e[0]||(e[0]=[i('Please also check common parameters for additional configuration options.
/run/secrets/app_client_certificate.pem
/run/secrets/app_client_certificate_private_key.pem
/run/secrets/app_client_certificate_private_key.pem.password
2000
10000
/run/secrets/app_server_trust_certificates.pem
true
false
true
false
2
/run/secrets/db_liquibase.password
liquibase_user
jdbc:postgresql://db/fhir
fhir_users
/run/secrets/db_user.password
fhir_permanent_delete_users
/run/secrets/db_user_permanent_delete.password
fhir_server_permanent_delete_user
fhir_server_user
https://foo.bar/fhir
conf/bundle.xml
hospital.com
20
false
false
for developmenttrue
foo.bar, test.com:8080
http://proxy.foo:8080
Please also check common parameters for additional configuration options.
\\n/run/secrets/app_client_certificate.pem
This setup guide uses pre-build docker images for DSF Version 0.9.3. This guide is only suitable for HiGHmed organizations.
If you are not a member of HiGHmed, see NUM-CODEX Install.
Both VMs need latest docker and docker-compose. For the latest install guide see https://docs.docker.com/engine/install and https://docs.docker.com/compose/install
docker:
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io
docker-compose (warning: 2.17.3 might not be latest):
sudo curl -L "https://github.com/docker/compose/releases/download/v2.17.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
Two Certificates from the DFN-PKI Global G2 (via DFN e.V.), GÉANT TCS (via DFN e.V.) or D-Trust (via TMF e.V.) are needed, more infos see Authentication
For additional information on the network setup see Network-and-Architecture.
Here is a quick overview of the expected network setup. Connections to the fTTP, the terminology server and simplifier.net for validating GECCO FHIR resources as well as the local GECCO FHIR server are not listed:
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (GECCO Transfer Hub) | 443 | https |
DSF FHIR (local) | DSF FHIR (GECCO Transfer Hub) | 443 | https (HTTP HEAD only) |
DSF BPE (GECCO Transfer Hub) | DSF FHIR (local) | 443 | https |
DSF FHIR (GECCO Transfer Hub) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
You are required to fill out the on-boarding Excel spreadsheet, provided with the NUM-CODEX hackathon invite, and send it to the GECCO Transfer Hub. If the GECCO Transfer Hub already received and validated your On-Boarding Excel spreadsheet and you do not have to change any information, you can skip this step.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo addgroup --gid 2101 fhir
+sudo adduser --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download prepared DSF FHIR server config files and folder structure from
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_highmed_test_fhir_0_9_3.tar.gz
+sudo tar --same-owner -zxvf dsf_highmed_test_fhir_0_9_3.tar.gz
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_highmed_prod_fhir_0_9_3.tar.gz
+sudo tar --same-owner -zxvf dsf_highmed_prod_fhir_0_9_3.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L56: ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L136: app_client_certificate_private_key.pem.password:
+L137: file: ./secrets/client_certificate_private_key.pem.password
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L101: ssl_certificate_chain_file.pem:
+L102: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
foo.bar.de
-> https://foo.bar.de/fhir
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.ab12...37ff,f3a2...bb22
. You can add additional client certificate thumbprints for example the thumbprint of your (the admins) personal DFN PKI S/MIME certificate, to access the DSF FHIR servers REST interface.ab12...37ff,f3a2...bb22
. Usually it is not necessary to add additional thumbprints other than your client certificate (certificate B) here. When a client uses a certificate with a thumbprint listed here, the client is allowed to permanently delete FHIR resources.Start the DSF FHIR Server
Start using: docker-compose up -d && docker-compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo addgroup --gid 2202 bpe
+sudo adduser --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download prepared DSF BPE server config files and folder structure from
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_highmed_test_bpe_0_9_3.tar.gz
+sudo tar --same-owner -zxvf dsf_highmed_test_bpe_0_9_3.tar.gz
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_highmed_prod_bpe_0_9_3.tar.gz
+sudo tar --same-owner -zxvf dsf_highmed_prod_bpe_0_9_3.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
/opt/bpe/psn
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L41: ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L99: app_client_certificate_private_key.pem.password:
+L100: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> https://foo.bar.de/fhir
Start the DSF BPE Server (without process plugins)
Start using: docker-compose up -d && docker-compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
Stop the DSF BPE Server
docker-compose stop
Add the following DSF BPE process plugins, for instructions on how to configure the plugin, see release notes.
Notice: Jar-files within the folders /opt/bpe/process
and /opt/bpe/plugin
need to be readable by the linxux bpe
user -> chown root:bpe
, chmod 440
Start the DSF BPE Server (with process plugins)
Start using: docker-compose up -d && docker-compose logs -f
(Ctrl-C will close log, but not stop container)
Request Allow-List upload from HiGHmed TTP
The Allow-List upload is needed in order to execute HiGHmed and NUM-CODEX processes.
This setup guide uses pre-build docker images for DSF Version 0.9.3. This guide is only suitable for HiGHmed organizations.
\\nIf you are not a member of HiGHmed, see NUM-CODEX Install.
Important note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nImportant note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nImportant note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nWe are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
Before you start contributing, we recommend reading our getting started guidelines for detailed information on our processes and standards. This ensures a smooth and productive experience for everyone involved.
Your contributions in any form, are what drives the continuous growth and improvement of this project. Thank you for being a part of our community and for your willingness to contribute!
',5)]))}const g=t(i,[["render",a],["__file","index.html.vue"]]),c=JSON.parse(`{"path":"/v1.6.0/contribute/","title":"Contribute","lang":"en-US","frontmatter":{"title":"Contribute","icon":"info","gitInclude":[]},"headers":[{"level":3,"title":"Ways you can contribute:","slug":"ways-you-can-contribute","link":"#ways-you-can-contribute","children":[]}],"readingTime":{"minutes":1.64,"words":491},"filePathRelative":"v1.6.0/contribute/readme.md","excerpt":"We are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
"}`);export{g as comp,c as data}; diff --git a/assets/index.html-Bur8W0e8.js b/assets/index.html-Bur8W0e8.js new file mode 100644 index 000000000..0d8f455a7 --- /dev/null +++ b/assets/index.html-Bur8W0e8.js @@ -0,0 +1 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as n,o as a}from"./app-BIWb5uIp.js";const t={};function o(l,e){return a(),r("div",null,e[0]||(e[0]=[n('Important note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nWe are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
Before you start contributing, we recommend reading our getting started guidelines for detailed information on our processes and standards. This ensures a smooth and productive experience for everyone involved.
Your contributions in any form, are what drives the continuous growth and improvement of this project. Thank you for being a part of our community and for your willingness to contribute!
',5)]))}const g=t(i,[["render",a],["__file","index.html.vue"]]),c=JSON.parse(`{"path":"/v1.5.2/contribute/","title":"Contribute","lang":"en-US","frontmatter":{"title":"Contribute","icon":"info","gitInclude":[]},"headers":[{"level":3,"title":"Ways you can contribute:","slug":"ways-you-can-contribute","link":"#ways-you-can-contribute","children":[]}],"readingTime":{"minutes":1.64,"words":491},"filePathRelative":"v1.5.2/contribute/readme.md","excerpt":"We are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
"}`);export{g as comp,c as data}; diff --git a/assets/index.html-CIYI18oA.js b/assets/index.html-CIYI18oA.js new file mode 100644 index 000000000..8efa3fd37 --- /dev/null +++ b/assets/index.html-CIYI18oA.js @@ -0,0 +1 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as n,o as a}from"./app-BIWb5uIp.js";const t={};function o(l,e){return a(),r("div",null,e[0]||(e[0]=[n('The Data Sharing Framework (DSF) is a concept for a secure middleware to distribute data sharing processes based on the BPMN 2.0 and FHIR R4 standards. The DSF is used to support biomedical research with routine data, aiming to extract, merge, pseudonymize and provide data stored in multiple distributed organizations. Every participating site runs a FHIR endpoint accessible by other sites and a business process engine in the local secured network. The process engines execute BPMN processes in order to coordinate local and remote steps necessary to enable cross-site data sharing or feasibility analyses. This includes access to local data repositories, use-and-access-committee decision support, consent filtering, and privacy preserving record-linkage and pseudonymization. The aim is to enable secure and syntactically-, semantically- and process-interoperable data exchange across organizational boundaries. The secure communication infrastructure is funded by the German Federal Ministry of Education and Research within the Medical Informatics structure as DSF Community.
We are very pleased to have won the Rolf Hansen Memorial Award at EFMI MIE 2023 in Gothenburg. The Rolf Hansen Memorial Award is presented annually by the European Federation for Medical Informatics (EFMI) for an outstanding paper as well as for an excellent presentation. It is named after Rolf Hansen (1931-1993), a well-known Norwegian medical informatician and former president of EFMI. Hauke Hund presented his Paper: No Transfer Without Validation: A Data Sharing Framework Use Case in May at the EFMI MIE. doi:10.3233/SHTI230066
The Data Sharing Framework (DSF) is a concept for a secure middleware to distribute data sharing processes based on the BPMN 2.0 and FHIR R4 standards. The DSF is used to support biomedical research with routine data, aiming to extract, merge, pseudonymize and provide data stored in multiple distributed organizations. Every participating site runs a FHIR endpoint accessible by other sites and a business process engine in the local secured network. The process engines execute BPMN processes in order to coordinate local and remote steps necessary to enable cross-site data sharing or feasibility analyses. This includes access to local data repositories, use-and-access-committee decision support, consent filtering, and privacy preserving record-linkage and pseudonymization. The aim is to enable secure and syntactically-, semantically- and process-interoperable data exchange across organizational boundaries. The secure communication infrastructure is funded by the German Federal Ministry of Education and Research within the Medical Informatics structure as DSF Community.
"}');export{u as comp,p as data}; diff --git a/assets/index.html-CKHbb906.js b/assets/index.html-CKHbb906.js new file mode 100644 index 000000000..0fc78e175 --- /dev/null +++ b/assets/index.html-CKHbb906.js @@ -0,0 +1 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as i,o}from"./app-BIWb5uIp.js";const s={};function n(a,e){return o(),r("div",null,e[0]||(e[0]=[i('We take security of the DSF, its process plugins, services and tools we operate very seriously.
We describe the security mechanisms used by the DSF to implement secure communication on the pages Architecture, Security and Allow List.
To ensure a high level of security, you should always install the latest DSF version and use the latest versions of the process plugins. Use the instructions to install the latest version of the DSF or to upgrade on the latest version. The instructions described there implement the security configuration recommended by us.
It is also important that you ensure a secure operating environment in which you verify firewall configurations, keep the operating systems on which the DSF is running up to date and harden it according to the latest state of the art.
We as the DSF development team take security of our software, services and data very seriously. We understand that despite our best efforts, vulnerabilities can exist. To address this, we encourage responsible reporting of any security vulnerabilities discovered in our software and systems.
We kindly ask security researchers and the general public to follow the principles of Coordinated Vulnerability Disclosure (CVD) or Responsible Disclosure when reporting vulnerabilities to us. This approach helps us to mitigate potential risks and protect our users' data effectively.
If you believe you have found a security vulnerability in our system, please email us at dsf-security@hs-heilbronn.de. If you want to use end-to-end-encryption, you can send us mails using s-mime with the certificate chain provided here. We kindly request the following:
Please do not file an issue on a security-related topic and use the e-mail address provided. You can verify the address both in the application repository and at the homepage (this page).
We promise not to initiate legal action against individuals who report vulnerabilities responsibly in accordance with this policy. This includes not suing for accidental access to data or reporting in good faith.
If you have any questions about this policy or security of the Data Sharing Framework, the services and tools we provide, please contact us at dsf-security@hs-heilbronn.de. You can send us encrypted e-mails using s-mime. You can find the certificate chain here.
',19)]))}const c=t(s,[["render",n],["__file","index.html.vue"]]),d=JSON.parse('{"path":"/security/","title":"Security","lang":"en-US","frontmatter":{"title":"Security","icon":"safe","gitInclude":[]},"headers":[{"level":2,"title":"Security vulnerability disclosure policy","slug":"security-vulnerability-disclosure-policy","link":"#security-vulnerability-disclosure-policy","children":[{"level":3,"title":"Our commitment to security","slug":"our-commitment-to-security","link":"#our-commitment-to-security","children":[]},{"level":3,"title":"Responsible disclosure","slug":"responsible-disclosure","link":"#responsible-disclosure","children":[]},{"level":3,"title":"How to Report a Vulnerability","slug":"how-to-report-a-vulnerability","link":"#how-to-report-a-vulnerability","children":[]},{"level":3,"title":"Our promise","slug":"our-promise","link":"#our-promise","children":[]},{"level":3,"title":"Legal Protection","slug":"legal-protection","link":"#legal-protection","children":[]},{"level":3,"title":"Questions?","slug":"questions","link":"#questions","children":[]}]}],"readingTime":{"minutes":1.97,"words":590},"filePathRelative":"security/readme.md","excerpt":"We take security of the DSF, its process plugins, services and tools we operate very seriously.
\\nWe describe the security mechanisms used by the DSF to implement secure communication on the pages Architecture, Security and Allow List.
"}');export{c as comp,d as data}; diff --git a/assets/index.html-CPm74umY.js b/assets/index.html-CPm74umY.js new file mode 100644 index 000000000..d4396a439 --- /dev/null +++ b/assets/index.html-CPm74umY.js @@ -0,0 +1 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,b as n,d as a,e as o,f as s,a as l,r as d,o as h}from"./app-BIWb5uIp.js";const p={};function m(f,e){const t=d("RouteLink");return h(),i("div",null,[n("p",null,[e[1]||(e[1]=a("Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click ")),o(t,{to:"/intro/"},{default:s(()=>e[0]||(e[0]=[a("here")])),_:1}),e[2]||(e[2]=a(" to find more information about the DSF in general."))]),e[3]||(e[3]=l('Important note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nImportant note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nWe are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
Before you start contributing, we recommend reading our getting started guidelines for detailed information on our processes and standards. This ensures a smooth and productive experience for everyone involved.
Your contributions in any form, are what drives the continuous growth and improvement of this project. Thank you for being a part of our community and for your willingness to contribute!
',5)]))}const g=t(i,[["render",a],["__file","index.html.vue"]]),c=JSON.parse(`{"path":"/stable/contribute/","title":"Contribute","lang":"en-US","frontmatter":{"title":"Contribute","icon":"info","gitInclude":[]},"headers":[{"level":3,"title":"Ways you can contribute:","slug":"ways-you-can-contribute","link":"#ways-you-can-contribute","children":[]}],"readingTime":{"minutes":1.64,"words":491},"filePathRelative":"stable/contribute/readme.md","excerpt":"We are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
"}`);export{g as comp,c as data}; diff --git a/assets/index.html-Cndqnr5H.js b/assets/index.html-Cndqnr5H.js new file mode 100644 index 000000000..6a871e1fc --- /dev/null +++ b/assets/index.html-Cndqnr5H.js @@ -0,0 +1 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,b as n,d as a,e as o,f as s,a as l,r as d,o as h}from"./app-BIWb5uIp.js";const p={};function m(f,e){const t=d("RouteLink");return h(),i("div",null,[n("p",null,[e[1]||(e[1]=a("Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click ")),o(t,{to:"/intro/"},{default:s(()=>e[0]||(e[0]=[a("here")])),_:1}),e[2]||(e[2]=a(" to find more information about the DSF in general."))]),e[3]||(e[3]=l('Important note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nWe are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
Before you start contributing, we recommend reading our getting started guidelines for detailed information on our processes and standards. This ensures a smooth and productive experience for everyone involved.
Your contributions in any form, are what drives the continuous growth and improvement of this project. Thank you for being a part of our community and for your willingness to contribute!
',5)]))}const g=t(i,[["render",a],["__file","index.html.vue"]]),c=JSON.parse(`{"path":"/v1.7.0/contribute/","title":"Contribute","lang":"en-US","frontmatter":{"title":"Contribute","icon":"info","gitInclude":[]},"headers":[{"level":3,"title":"Ways you can contribute:","slug":"ways-you-can-contribute","link":"#ways-you-can-contribute","children":[]}],"readingTime":{"minutes":1.64,"words":491},"filePathRelative":"v1.7.0/contribute/readme.md","excerpt":"We are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
"}`);export{g as comp,c as data}; diff --git a/assets/index.html-Cz99fyPG.js b/assets/index.html-Cz99fyPG.js new file mode 100644 index 000000000..9bf53d75e --- /dev/null +++ b/assets/index.html-Cz99fyPG.js @@ -0,0 +1 @@ +import{_ as o}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,b as t,e as l,f as n,r as u,o as m,d as a}from"./app-BIWb5uIp.js";const s={};function d(b,e){const r=u("RouteLink");return m(),i("div",null,[e[4]||(e[4]=t("h2",{id:"overview",tabindex:"-1"},[t("a",{class:"header-anchor",href:"#overview"},[t("span",null,"Overview")])],-1)),t("ul",null,[t("li",null,[l(r,{to:"/about/learnmore/contact.html"},{default:n(()=>e[0]||(e[0]=[a("Contact & Community Guide")])),_:1})]),t("li",null,[l(r,{to:"/about/learnmore/team.html"},{default:n(()=>e[1]||(e[1]=[a("Contributors")])),_:1})]),t("li",null,[l(r,{to:"/about/learnmore/partners.html"},{default:n(()=>e[2]||(e[2]=[a("Partners")])),_:1})]),t("li",null,[l(r,{to:"/about/learnmore/public.html"},{default:n(()=>e[3]||(e[3]=[a("Public Funding")])),_:1})])])])}const c=o(s,[["render",d],["__file","index.html.vue"]]),p=JSON.parse('{"path":"/about/learnmore/","title":"Learn more","lang":"en-US","frontmatter":{"title":"Learn more","icon":"creative","gitInclude":[]},"headers":[{"level":2,"title":"Overview","slug":"overview","link":"#overview","children":[]}],"readingTime":{"minutes":0.06,"words":17},"filePathRelative":"about/learnmore/README.md","excerpt":"Terminverschiebung
Die Sprechstunde wird aufgrund der FDPG Datenausleitungssprechstunde (DANA) ab dem 20.01.2025 von 14:00-14:50 Uhr stattfinden.
Gemeinsame technische Sprechstunde der DSF-Community und des FDPG+
Klicken Sie hier, um der Sprechstunde beizutreten.
',3)]))}const p=n(s,[["render",a],["__file","index.html.vue"]]),h=JSON.parse('{"path":"/sprechstunde/","title":"Sprechstunde","lang":"en-US","frontmatter":{"title":"Sprechstunde","icon":"guide","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.18,"words":55},"filePathRelative":"sprechstunde/index.md","excerpt":"Terminverschiebung
\\nDie Sprechstunde wird aufgrund der FDPG Datenausleitungssprechstunde (DANA) ab dem 20.01.2025 von 14:00-14:50 Uhr stattfinden.
\\nGemeinsame technische Sprechstunde der DSF-Community und des FDPG+
\\nWe are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
Before you start contributing, we recommend reading our getting started guidelines for detailed information on our processes and standards. This ensures a smooth and productive experience for everyone involved.
Your contributions in any form, are what drives the continuous growth and improvement of this project. Thank you for being a part of our community and for your willingness to contribute!
',5)]))}const g=t(i,[["render",a],["__file","index.html.vue"]]),c=JSON.parse(`{"path":"/v1.5.0/contribute/","title":"Contribute","lang":"en-US","frontmatter":{"title":"Contribute","icon":"info","gitInclude":[]},"headers":[{"level":3,"title":"Ways you can contribute:","slug":"ways-you-can-contribute","link":"#ways-you-can-contribute","children":[]}],"readingTime":{"minutes":1.64,"words":491},"filePathRelative":"v1.5.0/contribute/readme.md","excerpt":"We are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
"}`);export{g as comp,c as data}; diff --git a/assets/index.html-DQ3Nt6us.js b/assets/index.html-DQ3Nt6us.js new file mode 100644 index 000000000..785ad6b39 --- /dev/null +++ b/assets/index.html-DQ3Nt6us.js @@ -0,0 +1 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,b as n,d as a,e as o,f as s,a as l,r as d,o as h}from"./app-BIWb5uIp.js";const p={};function m(f,e){const t=d("RouteLink");return h(),i("div",null,[n("p",null,[e[1]||(e[1]=a("Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click ")),o(t,{to:"/intro/"},{default:s(()=>e[0]||(e[0]=[a("here")])),_:1}),e[2]||(e[2]=a(" to find more information about the DSF in general."))]),e[3]||(e[3]=l('Important note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nWe are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
Before you start contributing, we recommend reading our getting started guidelines for detailed information on our processes and standards. This ensures a smooth and productive experience for everyone involved.
Your contributions in any form, are what drives the continuous growth and improvement of this project. Thank you for being a part of our community and for your willingness to contribute!
',5)]))}const g=t(i,[["render",a],["__file","index.html.vue"]]),c=JSON.parse(`{"path":"/v1.4.0/contribute/","title":"Contribute","lang":"en-US","frontmatter":{"title":"Contribute","icon":"info","gitInclude":[]},"headers":[{"level":3,"title":"Ways you can contribute:","slug":"ways-you-can-contribute","link":"#ways-you-can-contribute","children":[]}],"readingTime":{"minutes":1.64,"words":491},"filePathRelative":"v1.4.0/contribute/readme.md","excerpt":"We are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
"}`);export{g as comp,c as data}; diff --git a/assets/index.html-DSkgDhK5.js b/assets/index.html-DSkgDhK5.js new file mode 100644 index 000000000..fe2ca5acd --- /dev/null +++ b/assets/index.html-DSkgDhK5.js @@ -0,0 +1 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as n,a as r,o as a}from"./app-BIWb5uIp.js";const t={};function o(l,e){return a(),n("div",null,e[0]||(e[0]=[r('Important note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nImportant note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nGemeinsame technische Sprechstunde der DSF-Community und des FDPG+
Zusammenfassung
Liebe Community,
die Modul 2B Projekte FDPG+ und DSF Community möchten Sie herzlich zu einem Hackathon am 22.10.2024 von 10:00-16:00 Uhr einladen.
Leiten Sie bitte die Einladung an die technischen Kolleg*innen in den DIZ und den DMSt weiter.
Fokus des Hackathons liegt auf der Erprobung der neuen Machbarkeit und des neuen Datenselektion- und -Extraktionstools (TORCH) im Zusammenspiel mit den DSF Data Sharing Prozessen. Ergänzend wird der FHIR Data Evaluator vorgestellt und Updates zum DSF, dem Feasibility Prozess Plugin und dem Allowlist Management präsentiert.
Wie üblich sollen die Tools und Prozesse hands-on auf der Testinfrastruktur erprobt und deployed werden, dazu folgen weitere Informationen kurz vor dem Hackathon.
Vorläufiger Zeitplan:
10:00-12:30 : Vorstellungen und Updates
• Vorstellung Datenselektion und Extraktionstool TORCH – FDPG+
• Vorstellung FHIR Evaluator – FDPG+
• DSF Updates – DSF Community
• Feasibility Prozessplugin Updates – DSF Community & FDPG+
• Allowlist Management Updates – DSF Community
12:30-14:00 : Mittagspause
14:00-16:00 : Hands-On
• Erproben der vorgestellten Tools
• Datenselektion und Extraktion mit Data Sharing
• Support und Fragen zum Deployment und Betrieb
• Wir freuen uns auf Ihre Teilnahme!
Viele Grüße,
Julian Gründner und Maximilian Kurscheidt
für FDPG+ und DSF Community
In diesem Hackathon ist das Aktualisieren der Systeme ein aktiver Bestandteil und bedarf keiner speziellen Vorbereitung. Für das Testen der Data Sharing Prozesse sollten jedoch einige Voraussetzungen bereits vor dem Termin erfüllt sein.
Für den Hackathon werden wir primär die Test-DMS aus Heilbronn verwenden, dafür muss neben der Freigabe zur FDPG auch die Freigabe zum und vom MII-Test-System der HHN vorhanden sein (mii-test.gecko.hs-heilbronn.de).
In diesem Kontext bietet es sich an, auch die Freigaben für weitere Test-DMSen zu beantragen. Weitere Details dazu befinden sich in der Liste der Firewallregeln im Test-Allowlist-Management-Tool (allowlist-test.gecko.hs-heilbronn.de) unter dem Punkt “Download Allowlist”. Wir planen während des Termins auch mit ausgewählten DIZen erste Funktionstests der Test-DMSen durchzuführen.
Wir empfehlen auch sicherzustellen, dass Sie Zugriff auf die Weboberfläche Ihres DSF-FHIR-Servers haben.
Zur Datenausleitung im Kontext des Data Sharing Prozesses werden wir FHIR Bundles mit Testdaten zur Verfügung stellen, die in Form eines FHIR Bundles auf einem FHIR Store gespeichert werden müssen. Dabei kann ein beliebiger FHIR Server (z.B. HAPI FHIR oder Blaze) verwendet werden. Es kann auch der FHIR Server verwendet werden, der z.B. für Feasibility im Testsystem genutzt wird.
Weitere Details zum Data Sharing Prozess sind hier zu finden:
https://github.com/medizininformatik-initiative/mii-process-data-sharing/wiki
Eine Installation kann vor oder während des Hackathons durchgeführt werden.
⚠️
Versionen der unten aufgeführten Tools können sich noch bis zum Hackathon ändern.
Installation oder Update
Falls Sie Fragen zum Hackathon oder der Installation des DSF haben, melden Sie sich gerne über den DSF-Community Zulip-Channel oder dsf-gecko@hs-heilbronn.de.
',37)]))}const h=n(a,[["render",s],["__file","index.html.vue"]]),p=JSON.parse('{"path":"/hackathon/","title":"Hackathon","lang":"en-US","frontmatter":{"title":"Hackathon","icon":"guide","gitInclude":[]},"headers":[{"level":2,"title":"Vorbereitung","slug":"vorbereitung","link":"#vorbereitung","children":[{"level":3,"title":"Netzwerkfreigaben","slug":"netzwerkfreigaben","link":"#netzwerkfreigaben","children":[]},{"level":3,"title":"FHIR-Store","slug":"fhir-store","link":"#fhir-store","children":[]}]},{"level":2,"title":"Installationshinweise","slug":"installationshinweise","link":"#installationshinweise","children":[{"level":3,"title":"DSF","slug":"dsf","link":"#dsf","children":[]},{"level":3,"title":"Prozessplugins","slug":"prozessplugins","link":"#prozessplugins","children":[]},{"level":3,"title":"Weitere Anwendungen","slug":"weitere-anwendungen","link":"#weitere-anwendungen","children":[]}]},{"level":2,"title":"Data Sharing Demodaten","slug":"data-sharing-demodaten","link":"#data-sharing-demodaten","children":[]}],"readingTime":{"minutes":2.21,"words":662},"filePathRelative":"hackathon/index.md","excerpt":"\\nGemeinsame technische Sprechstunde der DSF-Community und des FDPG+
\\nWe are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
Before you start contributing, we recommend reading our getting started guidelines for detailed information on our processes and standards. This ensures a smooth and productive experience for everyone involved.
Your contributions in any form, are what drives the continuous growth and improvement of this project. Thank you for being a part of our community and for your willingness to contribute!
',5)]))}const g=t(i,[["render",a],["__file","index.html.vue"]]),c=JSON.parse(`{"path":"/v1.5.1/contribute/","title":"Contribute","lang":"en-US","frontmatter":{"title":"Contribute","icon":"info","gitInclude":[]},"headers":[{"level":3,"title":"Ways you can contribute:","slug":"ways-you-can-contribute","link":"#ways-you-can-contribute","children":[]}],"readingTime":{"minutes":1.64,"words":491},"filePathRelative":"v1.5.1/contribute/readme.md","excerpt":"We are thrilled that you consider contributing to our projects. Your contributions, big or small, are greatly valued and play a significant role in the success and improvement of our work. Whether you're a seasoned developer or just starting out, there's a place for you here to make a meaningful impact.
"}`);export{g as comp,c as data}; diff --git a/assets/index.html-RXe6B_7F.js b/assets/index.html-RXe6B_7F.js new file mode 100644 index 000000000..4cfeddc3f --- /dev/null +++ b/assets/index.html-RXe6B_7F.js @@ -0,0 +1 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,b as l,d as t,e as n,f as o,r as s,o as d}from"./app-BIWb5uIp.js";const u={};function f(m,e){const a=s("RouteLink");return d(),i("div",null,[l("p",null,[e[1]||(e[1]=t("Data Sharing Framework 0.9.x is an old version of the Data Sharing Framework. Click ")),n(a,{to:"/intro/"},{default:o(()=>e[0]||(e[0]=[t("here")])),_:1}),e[2]||(e[2]=t(" to find more information about the DSF in general."))]),e[8]||(e[8]=l("h2",{id:"overview-v-0-9-x",tabindex:"-1"},[l("a",{class:"header-anchor",href:"#overview-v-0-9-x"},[l("span",null,"Overview (v 0.9.x)")])],-1)),l("ul",null,[l("li",null,[n(a,{to:"/oldstable/generalInformation/"},{default:o(()=>e[3]||(e[3]=[t("General Information")])),_:1})]),l("li",null,[n(a,{to:"/oldstable/code/"},{default:o(()=>e[4]||(e[4]=[t("Code")])),_:1})]),l("li",null,[n(a,{to:"/oldstable/build/"},{default:o(()=>e[5]||(e[5]=[t("Build and Test")])),_:1})]),l("li",null,[n(a,{to:"/oldstable/releases/"},{default:o(()=>e[6]||(e[6]=[t("Releases and Deployment")])),_:1})]),l("li",null,[n(a,{to:"/oldstable/tutorial/"},{default:o(()=>e[7]||(e[7]=[t("DSF Process Plugin Tutorial")])),_:1})])])])}const g=r(u,[["render",f],["__file","index.html.vue"]]),x=JSON.parse('{"path":"/oldstable/","title":"Version 0.9.x","lang":"en-US","frontmatter":{"title":"Version 0.9.x","icon":"guide","gitInclude":[]},"headers":[{"level":2,"title":"Overview (v 0.9.x)","slug":"overview-v-0-9-x","link":"#overview-v-0-9-x","children":[]}],"readingTime":{"minutes":0.17,"words":51},"filePathRelative":"oldstable/README.md","excerpt":"Data Sharing Framework 0.9.x is an old version of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nThis repository contains exercises to learn how to implement process plugins for the Data Sharing Framework (DSF). The tutorial is divided into several exercises that build on each other. For each exercise, a sample solution is provided in a separate branch.
This tutorial was first executed at the GMDS / TMF 2022 conference. A recording of the opening remarks by H. Hund and R. Wettstein can be found on YouTube. The slides of the opening remarks are available as a PDF download.
This repository contains exercises to learn how to implement process plugins for the Data Sharing Framework (DSF). The tutorial is divided into several exercises that build on each other. For each exercise, a sample solution is provided in a separate branch.
\\nThis tutorial was first executed at the GMDS / TMF 2022 conference. A recording of the opening remarks by H. Hund and R. Wettstein can be found on YouTube. The slides of the opening remarks are available as a PDF download.
Important note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nImportant note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.x is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nSummary
Use case agnostic middleware: DSF is adaptable to any distributed process, leveraging BPMN 2.0 and FHIR R4 for secure, efficient data sharing across various biomedical research scenarios.
Security: DSF prioritizes security through stringent authentication and authorization protocols, ensuring data is accessed and shared only by authorized organizations to maintain data confidentiality and integrity.
Proven in clinical research: Deployed in German university hospitals, DSF's effectiveness and reliability are validated in real-world settings.
Implementation guidance: DSF offers resources on how to implement new process plugins.
The Data Sharing Framework (DSF) is a secure middleware solution designed to facilitate data sharing across different organizations for biomedical research. It utilizes BPMN 2.0 and FHIR R4 standards to support processes such as data extraction, merging, pseudonymization, and provisioning. Funded by the German Federal Ministry of Education and Research as part of the Medical Informatics initiative, the DSF aims to improve data interoperability and security across institutional boundaries.
DSF enables distributed data sharing by providing each participating site with a FHIR endpoint and a business process engine. This setup ensures that data can be securely shared and processed across different sites, facilitating cross-site data sharing and feasibility analyses.
While DSF primarily uses the FHIR R4 standard to ensure high-quality data exchange, it is designed to be open and adaptable to other data formats. This flexibility allows for a wide range of data types to be incorporated into research projects.
Security is a critical component of DSF, which includes robust authentication and authorization protocols. These protocols ensure that data access and sharing are restricted to authorized organizations, maintaining the confidentiality and integrity of the data.
DSF is already deployed and operational in Data Integration Centers at German university hospitals, demonstrating its applicability and reliability in real-world clinical research settings.
Summary
\\nUse case agnostic middleware: DSF is adaptable to any distributed process, leveraging BPMN 2.0 and FHIR R4 for secure, efficient data sharing across various biomedical research scenarios.
\\nSecurity: DSF prioritizes security through stringent authentication and authorization protocols, ensuring data is accessed and shared only by authorized organizations to maintain data confidentiality and integrity.
\\nProven in clinical research: Deployed in German university hospitals, DSF's effectiveness and reliability are validated in real-world settings.
\\nImplementation guidance: DSF offers resources on how to implement new process plugins.
\\nImportant note
This is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
Data Sharing Framework 1.0.0 is the new major release of the Data Sharing Framework. Click here to find more information about the DSF in general.
\\nImportant note
\\nThis is a major DSF release not compatible with 0.9.x and older version developed at https://github.com/highmed/highmed-dsf.
\\nMarketplace for process plugins
To install and learn more about each Process Plugin, you can visit the Marketplace here.
/opt/bpe/process
:wget (your jar-file download link)
For example:
wget https://github.com/medizininformatik-initiative/mii-process-data-sharing/releases/download/v1.0.0.1/mii-process-data-sharing-1.0.0.1.jar
sudo chmod 440 (your jar-file name.jar)
+sudo chown root:bpe (your jar-file name.jar)
For example:
sudo chmod 440 mii-process-data-sharing-1.0.0.1.jar
+sudo chown root:bpe mii-process-data-sharing-1.0.0.1.jar
/opt/bpe/docker-compose.yml
docker-compose.yml
Marketplace for process plugins
\\nTo install and learn more about each Process Plugin, you can visit the Marketplace here.
\\nYou can find an overview of compatable process plugins below.
You can find an overview of compatable process plugins below.
\\nMarketplace for process plugins
To install and learn more about each Process Plugin, you can visit the Marketplace here.
/opt/bpe/process
:wget (your jar-file download link)
For example:
wget https://github.com/medizininformatik-initiative/mii-process-data-sharing/releases/download/v1.0.0.1/mii-process-data-sharing-1.0.0.1.jar
sudo chmod 440 (your jar-file name.jar)
+sudo chown root:bpe (your jar-file name.jar)
For example:
sudo chmod 440 mii-process-data-sharing-1.0.0.1.jar
+sudo chown root:bpe mii-process-data-sharing-1.0.0.1.jar
/opt/bpe/docker-compose.yml
docker-compose.yml
Marketplace for process plugins
\\nTo install and learn more about each Process Plugin, you can visit the Marketplace here.
\\nYou can find an overview of compatable process plugins below.
You can find an overview of compatable process plugins below.
\\nYou can find an overview of compatable process plugins below.
You can find an overview of compatable process plugins below.
\\nMarketplace for process plugins
To install and learn more about each Process Plugin, you can visit the Marketplace here.
/opt/bpe/process
:wget (your jar-file download link)
For example:
wget https://github.com/medizininformatik-initiative/mii-process-data-sharing/releases/download/v1.0.0.1/mii-process-data-sharing-1.0.0.1.jar
sudo chmod 440 (your jar-file name.jar)
+sudo chown root:bpe (your jar-file name.jar)
For example:
sudo chmod 440 mii-process-data-sharing-1.0.0.1.jar
+sudo chown root:bpe mii-process-data-sharing-1.0.0.1.jar
/opt/bpe/docker-compose.yml
docker-compose.yml
Marketplace for process plugins
\\nTo install and learn more about each Process Plugin, you can visit the Marketplace here.
\\nMarketplace for process plugins
To install and learn more about each Process Plugin, you can visit the Marketplace here.
/opt/bpe/process
:wget (your jar-file download link)
For example:
wget https://github.com/medizininformatik-initiative/mii-process-data-sharing/releases/download/v1.0.0.1/mii-process-data-sharing-1.0.0.1.jar
sudo chmod 440 (your jar-file name.jar)
+sudo chown root:bpe (your jar-file name.jar)
For example:
sudo chmod 440 mii-process-data-sharing-1.0.0.1.jar
+sudo chown root:bpe mii-process-data-sharing-1.0.0.1.jar
/opt/bpe/docker-compose.yml
docker-compose.yml
Marketplace for process plugins
\\nTo install and learn more about each Process Plugin, you can visit the Marketplace here.
\\nYou can find an overview of compatable process plugins below.
You can find an overview of compatable process plugins below.
\\nYou can find an overview of compatible process plugins below.
You can find an overview of compatible process plugins below.
\\nYou can find an overview of compatable process plugins below.
You can find an overview of compatable process plugins below.
\\nMember of existing networks
If you are part of an existing network (e.g. the German MII), please install the appropriate DSF version. For the production environment of MII and NUM this is currently DSF 0.9.x.
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_2_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_2_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL44: - app_client_certificate_private_key.pem.password
+...
+L59: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L146: app_client_certificate_private_key.pem.password:
+L147: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_2_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_2_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL18: - app_client_certificate_private_key.pem.password
+...
+L40: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L97: app_client_certificate_private_key.pem.password:
+L98: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID1}
+INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID2}
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
Member of existing networks
\\nIf you are part of an existing network (e.g. the German MII), please install the appropriate DSF version. For the production environment of MII and NUM this is currently DSF 0.9.x.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_5_1.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_5_1.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L54: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L141: app_client_certificate_private_key.pem.password:
+L142: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_5_1.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_5_1.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L35: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L89: app_client_certificate_private_key.pem.password:
+L90: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID1
+INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID2
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
ssl_certificate_chain_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_7_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_7_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL34: - app_client_certificate_private_key.pem.password
+...
+L47: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L109: app_client_certificate_private_key.pem.password:
+L110: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
dsf.todo.organization.com:443
https://foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see the FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_7_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_7_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL12: - app_client_certificate_private_key.pem.password
+...
+L32: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L83: app_client_certificate_private_key.pem.password:
+L84: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID1
+INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID2
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_5_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_5_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L54: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L141: app_client_certificate_private_key.pem.password:
+L142: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_5_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_5_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L35: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L89: app_client_certificate_private_key.pem.password:
+L90: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID1}
+INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID2}
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_3_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_3_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL44: - app_client_certificate_private_key.pem.password
+...
+L59: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L146: app_client_certificate_private_key.pem.password:
+L147: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_3_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_3_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL18: - app_client_certificate_private_key.pem.password
+...
+L40: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L97: app_client_certificate_private_key.pem.password:
+L98: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID1}
+INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID2}
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_5_2.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_5_2.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L54: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L141: app_client_certificate_private_key.pem.password:
+L142: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_5_2.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_5_2.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L35: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L89: app_client_certificate_private_key.pem.password:
+L90: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID1
+INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID2
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
ssl_certificate_chain_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_7_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_7_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL34: - app_client_certificate_private_key.pem.password
+...
+L47: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L109: app_client_certificate_private_key.pem.password:
+L110: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
dsf.todo.organization.com:443
https://foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see the FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_7_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_7_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL12: - app_client_certificate_private_key.pem.password
+...
+L32: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L83: app_client_certificate_private_key.pem.password:
+L84: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID1
+INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID2
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_4_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_4_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L54: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L141: app_client_certificate_private_key.pem.password:
+L142: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_4_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_4_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L35: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L89: app_client_certificate_private_key.pem.password:
+L90: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID1}
+INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID2}
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_6_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_6_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L54: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L141: app_client_certificate_private_key.pem.password:
+L142: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_6_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_6_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L35: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L89: app_client_certificate_private_key.pem.password:
+L90: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID1
+INFO Grizzly(1) - INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket open, session SOME_RANDOM_UUID2
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_3_2.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_3_2.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L54: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L141: app_client_certificate_private_key.pem.password:
+L142: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_3_2.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_3_2.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L35: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L89: app_client_certificate_private_key.pem.password:
+L90: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID1}
+INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID2}
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nIn the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo groupadd --gid 2101 fhir
+sudo useradd --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_3_1.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_3_1.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L54: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L141: app_client_certificate_private_key.pem.password:
+L142: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
(Optional) You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo groupadd --gid 2202 bpe
+sudo useradd --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_3_1.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_3_1.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L35: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L89: app_client_certificate_private_key.pem.password:
+L90: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID1}
+INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID2}
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
\\nUnified installation manual
\\nThe installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
\\nMember of existing networks
If you are part of an existing network (e.g. the German MII), please install the appropriate DSF version. For the production environment of MII and NUM this is currently DSF 0.9.x.
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo addgroup --gid 2101 fhir
+sudo adduser --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_1_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_1_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL44: - app_client_certificate_private_key.pem.password
+...
+L59: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L146: app_client_certificate_private_key.pem.password:
+L147: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
DEV_DSF_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves to the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
DEV_DSF_FHIR_SERVER_ORGANIZATION_THUMBPRINT: f4344032fe77bffb912ff5abfd44da89fe64d355affb8d0f14c9ecb8bdbf92c7fe5f995b1ec0c453e4228b395e331052e4639044df4933d57721de508a84d26f
Set the SHA-512 Hash (lowercase hex) of your client certificate (Certificate B)
Use certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.
DEV_DSF_FHIR_SERVER_ROLECONFIG: |
You can add other client certificates (e.g. personal certificates from admins) to your DSF instance. For additional information, see the FHIR server Access Control page.
For additional environment variables, see FHIR server Configuration Parameters page.
Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo addgroup --gid 2202 bpe
+sudo adduser --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_1_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_1_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL18: - app_client_certificate_private_key.pem.password
+...
+L40: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L97: app_client_certificate_private_key.pem.password:
+L98: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
DEV_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: todo.organization.com
Set your Organizations DSF identifier, aka the shortest FQDN that resolves the main homepage of the organization, e.g. hs-heilbronn.de
DEV_DSF_BPE_FHIR_SERVER_BASE_URL: https://dsf.todo.organization.com/fhir
Set your FHIR servers external FQDN, e.g. foo.bar.de
-> https://foo.bar.de/fhir
For additional environment variables, see the BPE server Configuration Parameters page.
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID1}
+INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID2}
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
Member of existing networks
\\nIf you are part of an existing network (e.g. the German MII), please install the appropriate DSF version. For the production environment of MII and NUM this is currently DSF 0.9.x.
\\nMember of existing networks
If you are part of an existing network (e.g. the German MII), please install the appropriate DSF version. For the production environment of MII and NUM this is currently DSF 0.9.x.
In the following installation manual we will show you how you can install your own DSF instance to be part of an already existing DSF network.
Unified installation manual
The installation instructions of DSF 0.9.x for different application use cases (e.g. NUM CODEX or HiGHmed) are now combined under one common manual. The specific steps for process installation and configuration are documented at the process plugin documentation pages.
Both VMs need latest docker (>= 24.0.0) and docker compose. For the latest install guide see https://docs.docker.com/engine/install.
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+sudo systemctl enable docker.service
+sudo systemctl enable containerd.service
The current version of docker compose is installed with the current docker version.
Two Certificates from the GÉANT TCS (via DFN e.V.), D-Trust (via TMF e.V.) or DFN-PKI Global G2 (legacy, no new certificates are issued) are needed:
TLS Web Server Authentication
(DFN PKI Profile: 'Web Server', Common-Name: Your external DSF FHIR Servers FQDN)TLS Web Client Authentication
(DFN PKI Profile: '802.1X Client', Common-Name: Your DSF BPE Servers FQDN)If you use GÉANT TCS certificates, then they are configured by default with the necessary X509v3 Extended Key Usages: TLS Web Server Authentication, TLS Web Client Authentication
.
Here is a quick overview of the expected network setup.
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (other DSF communication partners) | 443 | https |
DSF FHIR (local) | DSF FHIR (other DSF communication partners) | 443 | https (HTTP HEAD only) |
DSF BPE (other DSF communication partners) | DSF FHIR (local) | 443 | https |
DSF FHIR (other DSF communication partners) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
Connections to services that are used by process plugins (e.g. a fTTP, a terminology server, simplifier.net or a local FHIR server) are not listed. Please refer to the respective process plugin documentation pages for more information.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (Certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo addgroup --gid 2101 fhir
+sudo adduser --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download and unpack the prepared DSF FHIR server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_fhir_1_0_0.tar.gz
+sudo tar --same-owner -zxvf dsf_fhir_1_0_0.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL44: - app_client_certificate_private_key.pem.password
+...
+L59: DEV_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L149: app_client_certificate_private_key.pem.password:
+L150: file: ./secrets/client_certificate_private_key.pem.password
How to chmod / chown
For the example ssl_certificate_file.pem (chmod: 440, chown: fhir:docker) you must:
chmod 440 /opt/fhir/secrets/ssl_certificate_file.pem
fhir
and the group the file belongs to to docker
:chown fhir:docker /opt/fhir/secrets/ssl_certificate_file.pem
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L114: ssl_certificate_chain_file.pem:
+L115: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
foo.bar.de
-> https://foo.bar.de/fhir
hs-heilbronn.de
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.Start the DSF FHIR Server
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo addgroup --gid 2202 bpe
+sudo adduser --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download and extract prepared DSF BPE server config files and folder structure:
cd /opt
+wget https://dsf.dev/download/dsf_bpe_1_0_0.tar.gz
+sudo tar --same-owner -zxvf dsf_bpe_1_0_0.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL18: - app_client_certificate_private_key.pem.password
+...
+L40: DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L89: app_client_certificate_private_key.pem.password:
+L90: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
hs-heilbronn.de
foo.bar.de
-> https://foo.bar.de/fhir
Start the DSF BPE Server (without process plugins)
Start using: docker compose up -d && docker compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
By default, we will log both to the console (collected by docker) and to files in the log directory, so you can use docker compose logs -f
in /opt/bpe
and /opt/fhir
to view informational, warning and error logs. If you encounter any error and the reported information is not detailled enough, you can also check the logs in the /opt/fhir/log
and /opt/bpe/log
directories with debugging logs. There, you will also find older log files. If you have any questions and can't resolve them by yourself please always include the latest logs from /opt/fhir/log
and /opt/bpe/log
in your support request.
On a successful BPE start, you should see the following entries in your BPE log:
INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID1}
+INFO Grizzly(1) - ClientEndpoint.onOpen(37) | Websocket connected {uri: wss://FHIR_SERVER_FQDN/fhir/ws, session-id: SOME_RANDOM_UUID2}
Please visit the on boarding website of your network for more information.
Ideas for improvement?
Have you found an error or is something unclear to you? Then please feel free to write to us at gth-gecko@hs-heilbronn.de. Thank you very much!
Member of existing networks
\\nIf you are part of an existing network (e.g. the German MII), please install the appropriate DSF version. For the production environment of MII and NUM this is currently DSF 0.9.x.
\\n<code_scheme name="highmed_dsf" version="173">
+ <option name="FORMATTER_TAGS_ENABLED" value="true" />
+ <JavaCodeStyleSettings>
+ <option name="ANNOTATION_PARAMETER_WRAP" value="1" />
+ <option name="CLASS_COUNT_TO_USE_IMPORT_ON_DEMAND" value="99" />
+ <option name="NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND" value="99" />
+ <option name="PACKAGES_TO_USE_IMPORT_ON_DEMAND">
+ <value />
+ </option>
+ <option name="IMPORT_LAYOUT_TABLE">
+ <value>
+ <package name="" withSubpackages="true" static="true" />
+ <emptyLine />
+ <package name="java" withSubpackages="true" static="false" />
+ <emptyLine />
+ <package name="javax" withSubpackages="true" static="false" />
+ <emptyLine />
+ <package name="org" withSubpackages="true" static="false" />
+ <emptyLine />
+ <package name="com" withSubpackages="true" static="false" />
+ <emptyLine />
+ <package name="ca" withSubpackages="true" static="false" />
+ <emptyLine />
+ <package name="de" withSubpackages="true" static="false" />
+ <emptyLine />
+ <package name="" withSubpackages="true" static="false" />
+ <emptyLine />
+ </value>
+ </option>
+ </JavaCodeStyleSettings>
+ <codeStyleSettings language="JAVA">
+ <option name="KEEP_LINE_BREAKS" value="false" />
+ <option name="KEEP_FIRST_COLUMN_COMMENT" value="false" />
+ <option name="KEEP_CONTROL_STATEMENT_IN_ONE_LINE" value="false" />
+ <option name="KEEP_BLANK_LINES_IN_DECLARATIONS" value="1" />
+ <option name="KEEP_BLANK_LINES_IN_CODE" value="1" />
+ <option name="KEEP_BLANK_LINES_BEFORE_RBRACE" value="1" />
+ <option name="BRACE_STYLE" value="2" />
+ <option name="CLASS_BRACE_STYLE" value="2" />
+ <option name="METHOD_BRACE_STYLE" value="2" />
+ <option name="ELSE_ON_NEW_LINE" value="true" />
+ <option name="WHILE_ON_NEW_LINE" value="true" />
+ <option name="CATCH_ON_NEW_LINE" value="true" />
+ <option name="FINALLY_ON_NEW_LINE" value="true" />
+ <option name="ALIGN_MULTILINE_PARAMETERS" value="false" />
+ <option name="ALIGN_MULTILINE_RESOURCES" value="false" />
+ <option name="SPACE_WITHIN_ARRAY_INITIALIZER_BRACES" value="true" />
+ <option name="SPACE_BEFORE_ARRAY_INITIALIZER_LBRACE" value="true" />
+ <option name="CALL_PARAMETERS_WRAP" value="1" />
+ <option name="METHOD_PARAMETERS_WRAP" value="1" />
+ <option name="RESOURCE_LIST_WRAP" value="5" />
+ <option name="EXTENDS_LIST_WRAP" value="1" />
+ <option name="THROWS_LIST_WRAP" value="1" />
+ <option name="EXTENDS_KEYWORD_WRAP" value="1" />
+ <option name="THROWS_KEYWORD_WRAP" value="1" />
+ <option name="METHOD_CALL_CHAIN_WRAP" value="1" />
+ <option name="BINARY_OPERATION_WRAP" value="1" />
+ <option name="BINARY_OPERATION_SIGN_ON_NEXT_LINE" value="true" />
+ <option name="TERNARY_OPERATION_WRAP" value="5" />
+ <option name="ARRAY_INITIALIZER_WRAP" value="1" />
+ <option name="PARAMETER_ANNOTATION_WRAP" value="2" />
+ <option name="VARIABLE_ANNOTATION_WRAP" value="2" />
+ <indentOptions>
+ <option name="USE_TAB_CHARACTER" value="true" />
+ </indentOptions>
+ </codeStyleSettings>
+</code_scheme>
<code_scheme name=\\"highmed_dsf\\" version=\\"173\\">\\n <option name=\\"FORMATTER_TAGS_ENABLED\\" value=\\"true\\" />\\n <JavaCodeStyleSettings>\\n <option name=\\"ANNOTATION_PARAMETER_WRAP\\" value=\\"1\\" />\\n <option name=\\"CLASS_COUNT_TO_USE_IMPORT_ON_DEMAND\\" value=\\"99\\" />\\n <option name=\\"NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND\\" value=\\"99\\" />\\n <option name=\\"PACKAGES_TO_USE_IMPORT_ON_DEMAND\\">\\n <value />\\n </option>\\n <option name=\\"IMPORT_LAYOUT_TABLE\\">\\n <value>\\n <package name=\\"\\" withSubpackages=\\"true\\" static=\\"true\\" />\\n <emptyLine />\\n <package name=\\"java\\" withSubpackages=\\"true\\" static=\\"false\\" />\\n <emptyLine />\\n <package name=\\"javax\\" withSubpackages=\\"true\\" static=\\"false\\" />\\n <emptyLine />\\n <package name=\\"org\\" withSubpackages=\\"true\\" static=\\"false\\" />\\n <emptyLine />\\n <package name=\\"com\\" withSubpackages=\\"true\\" static=\\"false\\" />\\n <emptyLine />\\n <package name=\\"ca\\" withSubpackages=\\"true\\" static=\\"false\\" />\\n <emptyLine />\\n <package name=\\"de\\" withSubpackages=\\"true\\" static=\\"false\\" />\\n <emptyLine />\\n <package name=\\"\\" withSubpackages=\\"true\\" static=\\"false\\" />\\n <emptyLine />\\n </value>\\n </option>\\n </JavaCodeStyleSettings>\\n <codeStyleSettings language=\\"JAVA\\">\\n <option name=\\"KEEP_LINE_BREAKS\\" value=\\"false\\" />\\n <option name=\\"KEEP_FIRST_COLUMN_COMMENT\\" value=\\"false\\" />\\n <option name=\\"KEEP_CONTROL_STATEMENT_IN_ONE_LINE\\" value=\\"false\\" />\\n <option name=\\"KEEP_BLANK_LINES_IN_DECLARATIONS\\" value=\\"1\\" />\\n <option name=\\"KEEP_BLANK_LINES_IN_CODE\\" value=\\"1\\" />\\n <option name=\\"KEEP_BLANK_LINES_BEFORE_RBRACE\\" value=\\"1\\" />\\n <option name=\\"BRACE_STYLE\\" value=\\"2\\" />\\n <option name=\\"CLASS_BRACE_STYLE\\" value=\\"2\\" />\\n <option name=\\"METHOD_BRACE_STYLE\\" value=\\"2\\" />\\n <option name=\\"ELSE_ON_NEW_LINE\\" value=\\"true\\" />\\n <option name=\\"WHILE_ON_NEW_LINE\\" value=\\"true\\" />\\n <option name=\\"CATCH_ON_NEW_LINE\\" value=\\"true\\" />\\n <option name=\\"FINALLY_ON_NEW_LINE\\" value=\\"true\\" />\\n <option name=\\"ALIGN_MULTILINE_PARAMETERS\\" value=\\"false\\" />\\n <option name=\\"ALIGN_MULTILINE_RESOURCES\\" value=\\"false\\" />\\n <option name=\\"SPACE_WITHIN_ARRAY_INITIALIZER_BRACES\\" value=\\"true\\" />\\n <option name=\\"SPACE_BEFORE_ARRAY_INITIALIZER_LBRACE\\" value=\\"true\\" />\\n <option name=\\"CALL_PARAMETERS_WRAP\\" value=\\"1\\" />\\n <option name=\\"METHOD_PARAMETERS_WRAP\\" value=\\"1\\" />\\n <option name=\\"RESOURCE_LIST_WRAP\\" value=\\"5\\" />\\n <option name=\\"EXTENDS_LIST_WRAP\\" value=\\"1\\" />\\n <option name=\\"THROWS_LIST_WRAP\\" value=\\"1\\" />\\n <option name=\\"EXTENDS_KEYWORD_WRAP\\" value=\\"1\\" />\\n <option name=\\"THROWS_KEYWORD_WRAP\\" value=\\"1\\" />\\n <option name=\\"METHOD_CALL_CHAIN_WRAP\\" value=\\"1\\" />\\n <option name=\\"BINARY_OPERATION_WRAP\\" value=\\"1\\" />\\n <option name=\\"BINARY_OPERATION_SIGN_ON_NEXT_LINE\\" value=\\"true\\" />\\n <option name=\\"TERNARY_OPERATION_WRAP\\" value=\\"5\\" />\\n <option name=\\"ARRAY_INITIALIZER_WRAP\\" value=\\"1\\" />\\n <option name=\\"PARAMETER_ANNOTATION_WRAP\\" value=\\"2\\" />\\n <option name=\\"VARIABLE_ANNOTATION_WRAP\\" value=\\"2\\" />\\n <indentOptions>\\n <option name=\\"USE_TAB_CHARACTER\\" value=\\"true\\" />\\n </indentOptions>\\n </codeStyleSettings>\\n</code_scheme>
\\nThe German Federal Ministry of Education and Research is funding the Medical Informatics Initiative with the aim of making routine data available digitally, reliably and quickly for medical research. University hospitals have founded consortia with partners such as research institutions and other companies to create the conditions for research and patient care to share their data across sites. Data Integration Centers (DIC) have been established at the university hospitals and partner institutions to create the technical and organizational conditions for data exchange between patient care and medical research.
The Data Sharing Framework (DSF) was developed within the HiGHmed consortium of the Medical Informatics Initiative and is now funded as an independent project by the German Federal Ministry of Education and Research within the Medical Informatics structure as DSF Community.
The DSF is a concept for a secure middleware to distribute data sharing processes, aiming to extract, merge, pseudonymize and provide data stored in multiple distributed DICs. A researcher can use the DSF for example to submit feasibility queries to several DICs and thus has the opportunity to obtain sufficient data from different locations for his or her research. By using international standards such as FHIR and BPMN 2.0, the problem of heterogeneous data models, primary systems, architectures and federated legislation between DICs can be solved. The aim is to enable secure and syntactically-, semantically- and process-interoperable data exchange across organisational boundaries.
The DSF is a secure communication infrastructure, that (1) scales with communication relationships, (2) orchestrates processes and instances, (3) separates execution logic from program code, (4) enables automated and user-centeric process steps (5) and can be used for heterogeny structures.
',7)]))}const u=i(c,[["render",d],["__file","introduction.html.vue"]]),g=JSON.parse('{"path":"/intro/info/introduction.html","title":"Introduction","lang":"en-US","frontmatter":{"title":"Introduction","icon":"customize","gitInclude":[]},"headers":[],"readingTime":{"minutes":1.05,"words":316},"filePathRelative":"intro/info/introduction.md","excerpt":"The German Federal Ministry of Education and Research is funding the Medical Informatics Initiative with the aim of making routine data available digitally, reliably and quickly for medical research. University hospitals have founded consortia with partners such as research institutions and other companies to create the conditions for research and patient care to share their data across sites. Data Integration Centers (DIC) have been established at the university hospitals and partner institutions to create the technical and organizational conditions for data exchange between patient care and medical research.
"}');export{u as comp,g as data}; diff --git a/assets/introduction.html-KmTMU4Pu.js b/assets/introduction.html-KmTMU4Pu.js new file mode 100644 index 000000000..fdda5663e --- /dev/null +++ b/assets/introduction.html-KmTMU4Pu.js @@ -0,0 +1 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,b as e,o as n}from"./app-BIWb5uIp.js";const a="/photos/guideline/introduction/dsf_architecture.svg",r={};function o(c,s){return n(),i("div",null,s[0]||(s[0]=[e("p",null,"The Data Sharing Framework implements a distributed process engine based on the BPMN 2.0 and FHIR R4 standards. The DSF is used to support biomedical research with routine data. Every participating site runs a FHIR endpoint (dsf-fhir) accessible by other sites and a business process engine (dsf-bpe) in the local secured network. Authentication between sites is handled using X.509 client/server certificates. The process engines execute BPMN processes in order to coordinate local and remote steps necessary to enable cross-site data sharing and feasibility analyses. This includes access to local data repositories, use-and-access-committee decision support, consent filtering, and privacy preserving record-linkage and pseudonymization.",-1),e("figure",null,[e("img",{src:a,alt:"DSF Architecture",tabindex:"0",loading:"lazy"}),e("figcaption",null,"DSF Architecture")],-1)]))}const p=t(r,[["render",o],["__file","introduction.html.vue"]]),u=JSON.parse('{"path":"/oldstable/introduction.html","title":"Introduction","lang":"en-US","frontmatter":{"title":"Introduction","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.39,"words":116},"filePathRelative":"oldstable/introduction.md","excerpt":"The Data Sharing Framework implements a distributed process engine based on the BPMN 2.0 and FHIR R4 standards. The DSF is used to support biomedical research with routine data. Every participating site runs a FHIR endpoint (dsf-fhir) accessible by other sites and a business process engine (dsf-bpe) in the local secured network. Authentication between sites is handled using X.509 client/server certificates. The process engines execute BPMN processes in order to coordinate local and remote steps necessary to enable cross-site data sharing and feasibility analyses. This includes access to local data repositories, use-and-access-committee decision support, consent filtering, and privacy preserving record-linkage and pseudonymization.
"}');export{p as comp,u as data}; diff --git a/assets/learn.html-CN30f4q5.js b/assets/learn.html-CN30f4q5.js new file mode 100644 index 000000000..aab55d1cc --- /dev/null +++ b/assets/learn.html-CN30f4q5.js @@ -0,0 +1 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,b as e,e as o,f as a,r as s,o as u,d as r}from"./app-BIWb5uIp.js";const m={},p={class:"hint-container warning"};function c(d,t){const n=s("RouteLink");return u(),l("div",null,[e("div",p,[t[3]||(t[3]=e("p",{class:"hint-container-title"},"Work in progress",-1)),t[4]||(t[4]=e("p",null,"This site is work in progress, please come back later.",-1)),t[5]||(t[5]=e("p",null,"If you want further information about the DSF and how to implement use cases, please visit:",-1)),e("ul",null,[e("li",null,[o(n,{to:"/intro/"},{default:a(()=>t[0]||(t[0]=[r("Documentation Overview")])),_:1})]),e("li",null,[o(n,{to:"/stable/"},{default:a(()=>t[1]||(t[1]=[r("Getting started")])),_:1})]),e("li",null,[o(n,{to:"/about/learnmore/contact.html"},{default:a(()=>t[2]||(t[2]=[r("Contact us")])),_:1})])])])])}const h=i(m,[["render",c],["__file","learn.html.vue"]]),v=JSON.parse('{"path":"/for-you/learn.html","title":"How to implement your use-case","lang":"en-US","frontmatter":{"title":"How to implement your use-case","icon":"creative","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.16,"words":48},"filePathRelative":"for-you/learn.md","excerpt":"Work in progress
\\nThis site is work in progress, please come back later.
\\nIf you want further information about the DSF and how to implement use cases, please visit:
\\nThe Data Sharing Framework consists of two components: A FHIR Endpoint Server used to except Task resources and provide resources for download by other organizations and a Business Process Engine Server run internal and not accessible by other organization to execute and coordinate processes.
More information on Client- and Server-Certificates are available on the dedicated wiki page: Authentication: Client/Server Certificates
In some organizations an additional reverse proxy in an external DMZ is needed. This can be accomplished by using for example nginx or haproxy as a TCP-Proxy. Example configurations routing traffic using SNI can be found below. Configuring an additional reverse proxy to terminate the incoming TLS connection early is not recommended.
nginx
http {
+ # ...
+}
+
+stream {
+ map $ssl_preread_server_name $name {
+ fhir.example.com fhir;
+ }
+
+ upstream fhir {
+ server 192.168.0.1:443;
+ }
+
+ server {
+ listen 443;
+ proxy_pass $name;
+ ssl_preread on;
+ }
+}
haproxy
defaults
+ timeout connect 5s
+ timeout client 30s
+ timeout server 30s
+
+frontend ingress
+ bind :443
+ mode tcp
+
+ tcp-request inspect-delay 5s
+ tcp-request content accept if { req_ssl_hello_type 1 }
+ use_backend fhir if { req.ssl_sni fhir.example.com }
+
+backend fhir
+ mode tcp
+
+ server fhir 192.168.0.1:443
The Data Sharing Framework consists of two components: A FHIR Endpoint Server used to except Task resources and provide resources for download by other organizations and a Business Process Engine Server run internal and not accessible by other organization to execute and coordinate processes.
"}');export{m as comp,u as data}; diff --git a/assets/networkSetup.html-CAU1OxOw.js b/assets/networkSetup.html-CAU1OxOw.js new file mode 100644 index 000000000..68102cce6 --- /dev/null +++ b/assets/networkSetup.html-CAU1OxOw.js @@ -0,0 +1 @@ +import{_ as r,a as t}from"./highmed_dsf_network_setup_ext_dmz-DyQZOoIs.js";import{_ as o}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,a as n,o as a}from"./app-BIWb5uIp.js";const s={};function c(d,e){return a(),i("div",null,e[0]||(e[0]=[n('The Data Sharing Framework consists of two components: A FHIR Endpoint Server used to except Task resources and provide resources for download by other organizations and a Business Process Engine Server run internal and not accessible by other organization to execute and coordinate processes.
More information on Client- and Server-Certificates are available on the dedicated wiki page: Authentication: Client/Server Certificates
In some organizations an additional reverse proxy in an external DMZ is needed. This can be accomplished by using for example nginx or haproxy as a TCP-Proxy. Example configurations routing traffic using SNI can be found below. Configuring an additional reverse proxy to terminate the incoming TLS connection early is not recommended.
',7)]))}const m=o(s,[["render",c],["__file","networkSetup.html.vue"]]),g=JSON.parse('{"path":"/intro/info/networkSetup.html","title":"Network Setup and General Architecture","lang":"en-US","frontmatter":{"title":"Network Setup and General Architecture","icon":"customize","gitInclude":[]},"headers":[{"level":2,"title":"Additional Reverse Proxy in external DMZ","slug":"additional-reverse-proxy-in-external-dmz","link":"#additional-reverse-proxy-in-external-dmz","children":[]}],"readingTime":{"minutes":1.62,"words":486},"filePathRelative":"intro/info/networkSetup.md","excerpt":"The Data Sharing Framework consists of two components: A FHIR Endpoint Server used to except Task resources and provide resources for download by other organizations and a Business Process Engine Server run internal and not accessible by other organization to execute and coordinate processes.
"}');export{m as comp,g as data}; diff --git a/assets/num-codexInstall.html-GtOtu3Li.js b/assets/num-codexInstall.html-GtOtu3Li.js new file mode 100644 index 000000000..89d9da973 --- /dev/null +++ b/assets/num-codexInstall.html-GtOtu3Li.js @@ -0,0 +1,28 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,o as t}from"./app-BIWb5uIp.js";const a={};function n(o,e){return t(),r("div",null,e[0]||(e[0]=[s(`This setup guide uses pre-build docker images for DSF Version 0.9.3. This guide is not suitable for HiGHmed organizations.
If you are a member of HiGHmed, see HiGHmed Install.
Both VMs need latest docker and docker-compose. For the latest install guide see https://docs.docker.com/engine/install and https://docs.docker.com/compose/install
docker:
sudo apt-get update
+sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+sudo apt-get install docker-ce docker-ce-cli containerd.io
docker-compose (warning: 2.17.3 might not be latest):
sudo curl -L "https://github.com/docker/compose/releases/download/v2.17.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
Two Certificates from the DFN-PKI Global G2 (via DFN e.V.), GÉANT TCS (via DFN e.V.) or D-Trust (via TMF e.V.) are needed, more infos see Authentication
For additional information on the network setup see Network-and-Architecture.
Here is a quick overview of the expected network setup. Connections to the fTTP, the terminology server and simplifier.net for validating GECCO FHIR resources as well as the local GECCO FHIR server are not listed:
Source | Target | Port | Protocol |
---|---|---|---|
DSF BPE (local) | DSF FHIR (local) | 443 | https, wss |
DSF BPE (local) | DSF FHIR (GECCO Transfer Hub) | 443 | https |
DSF FHIR (local) | DSF FHIR (GECCO Transfer Hub) | 443 | https (HTTP HEAD only) |
DSF BPE (GECCO Transfer Hub) | DSF FHIR (local) | 443 | https |
DSF FHIR (GECCO Transfer Hub) | DSF FHIR (local) | 443 | https (HTTP HEAD only) |
You are required to fill out the on-boarding Excel spreadsheet, provided with the NUM-CODEX hackathon invite, and send it to the GECCO Transfer Hub. If the GECCO Transfer Hub already received and validated your On-Boarding Excel spreadsheet and you do not have to change any information, you can skip this step.
Server Certificate (certificate A)
This certificate will be used as the DSF FHIR servers server certificate (ssl_certificate_file.pem, ssl_certificate_key_file.pem)
ssl_certificate_file.pem
ssl_certificate_key_file.pem
Client Certificate (certificate B)
This certificate will be used as the DSF BPE servers client certificate (client_certificate.pem, client_certificate_private_key.pem) as well as the DSF FHIR servers client certificate (client_certificate.pem, client_certificate_private_key.pem)
client_certificate.pem
client_certificate_private_key.pem
Add Group/User
Add group and user used by the DSF FHIR java application. Ubuntu compatible commands below:
sudo addgroup --gid 2101 fhir
+sudo adduser --system --no-create-home --uid 2101 --gid 2101 fhir
Download and Extract Config Files
Download prepared DSF FHIR server config files and folder structure from
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_codex_test_fhir_0_9_3.tar.gz
+sudo tar --same-owner -zxvf dsf_codex_test_fhir_0_9_3.tar.gz
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_codex_prod_fhir_0_9_3.tar.gz
+sudo tar --same-owner -zxvf dsf_codex_prod_fhir_0_9_3.tar.gz
The tar
command will unpack the config files at /opt/fhir
assuming you changed into the /opt
directory.
Verify that the fhir
system user or group can write into the following folder
/opt/fhir/log
Add certificates and keys
docker-compose.yml
fileL39: - app_client_certificate_private_key.pem.password
+...
+L56: ORG_HIGHMED_DSF_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L137: app_client_certificate_private_key.pem.password:
+L138: file: ./secrets/client_certificate_private_key.pem.password
Uncomment one of the certificate chain entries in the docker-compose file base on the certificate authority that signed your DSF FHIR server certificate (certificate A). For example use the following two lines if the server certificate is signed by DFN-Verein Global Issuing CA
L102: ssl_certificate_chain_file.pem:
+L103: file: ./secrets/ssl_certificate_chain_file_DFN-Verein.pem
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> foo.bar.de:443
foo.bar.de
foo.bar.de
-> https://foo.bar.de/fhir
certtool --fingerprint --hash=sha512 --infile=client_certificate.pem
to generate the hash.ab12...37ff,f3a2...bb22
. You can add additional client certificate thumbprints for example the thumbprint of your (the admins) personal DFN PKI S/MIME certificate, to access the DSF FHIR servers REST interface.ab12...37ff,f3a2...bb22
. Usually it is not necessary to add additional thumbprints other than your client certificate (certificate B) here. When a client uses a certificate with a thumbprint listed here, the client is allowed to permanently delete FHIR resources.Start the DSF FHIR Server
Start using: docker-compose up -d && docker-compose logs -f
(Ctrl-C will close log, but not stop container)
Add Group/User
Add group and user used by the DSF BPE java application. Ubuntu compatible commands below:
sudo addgroup --gid 2202 bpe
+sudo adduser --system --no-create-home --uid 2202 --gid 2202 bpe
Download and Extract Config Files
Download prepared DSF BPE server config files and folder structure from
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_codex_test_bpe_0_9_3.tar.gz
+sudo tar --same-owner -zxvf dsf_codex_test_bpe_0_9_3.tar.gz
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_codex_prod_bpe_0_9_3.tar.gz
+sudo tar --same-owner -zxvf dsf_codex_prod_bpe_0_9_3.tar.gz
The tar
command will unpack the config files at /opt/bpe
assuming you changed into the /opt
directory.
Verify that the bpe
system user or group can write into the following folders
/opt/bpe/log
Add certificates and keys
docker-compose.yml
fileL13: - app_client_certificate_private_key.pem.password
+...
+L38: ORG_HIGHMED_DSF_BPE_FHIR_CLIENT_CERTIFICATE_PRIVATE_KEY_PASSWORD_FILE: /run/secrets/app_client_certificate_private_key.pem.password
+...
+L92: app_client_certificate_private_key.pem.password:
+L93: file: ./secrets/client_certificate_private_key.pem.password
Modify database passwords
Modify the docker-compose.yml file and set environment variables to the appropriate values
foo.bar.de
-> https://foo.bar.de/fhir
Start the DSF BPE Server (without process plugins)
Start using: docker-compose up -d && docker-compose logs -f
(Ctrl-C will close log, but not stop container)
Verify DSF BPE Startup
If you need to debug the TLS connection to your DSF FHIR server use for example:docker run -it --rm alpine/openssl s_client your-fhir-server.fqdn:443
The command above should print the server certificate of your DSF FHIR server (certificate A) and end with a message like [...]tlsv13 alert certificate required[...]
Stop the DSF BPE Server
docker-compose stop
Add the following DSF BPE process plugins, for instructions on how to configure the plugin, see release notes.
Notice: Jar-files within the folders /opt/bpe/process
and /opt/bpe/plugin
need to be readable by the linxux bpe
user -> chown root:bpe
, chmod 440
Start the DSF BPE Server (with process plugins)
Start using: docker-compose up -d && docker-compose logs -f
(Ctrl-C will close log, but not stop container)
This setup guide uses pre-build docker images for DSF Version 0.9.3. This guide is not suitable for HiGHmed organizations.
\\nIf you are a member of HiGHmed, see HiGHmed Install.
The Network University Medicine was established in April 2020 as part of the COVID-19 pandemic crisis management. The aim of the NUM is to better coordinate COVID-19 research at all 36 university hospitals in Germany.'
As part of the CODEX | COVID-19 Data Exchange Platform project, a nationwide, uniform, privacy-compliant infrastructure for storing and providing COVID-19 research datasets was established. Since 2022, the work continues within the project NUM RDP. All 36 university hospitals have installed the DSF to share Covid-19 research data.
The Data Transfer Process is used in NUM CODEX to send data from a Data Integration Center (DIC), via the Gecco Transfer Hub (GTH), to the Central Research Repository (CRR). The infrastructure and communincation messages on which the process is based can be seen in the following figure. All organizations use the Data Sharing Framework (DSF) for deployment and execution of the process.
More technical information can be found on GitHub.
',7)]))}const d=t(o,[["render",c],["__file","num.html.vue"]]),m=JSON.parse(`{"path":"/intro/use-cases/num.html","title":"Network University Medicine","lang":"en-US","frontmatter":{"title":"Network University Medicine","icon":"map","gitInclude":[]},"headers":[{"level":3,"title":"The Data Transfer Process","slug":"the-data-transfer-process","link":"#the-data-transfer-process","children":[]}],"readingTime":{"minutes":0.66,"words":199},"filePathRelative":"intro/use-cases/num.md","excerpt":"The Network University Medicine was established in April 2020 as part of the COVID-19 pandemic crisis management. The aim of the NUM is to better coordinate COVID-19 research at all 36 university hospitals in Germany.'
"}`);export{d as comp,m as data}; diff --git a/assets/oidc.html-BTDGYEv3.js b/assets/oidc.html-BTDGYEv3.js new file mode 100644 index 000000000..a666a0fd2 --- /dev/null +++ b/assets/oidc.html-BTDGYEv3.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,a,b as e,d as s,e as r,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const p={},c={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),l("div",null,[i[6]||(i[6]=a('Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF BPE server supports Authorization Code Flow for the user interface. Back-Channel Logout is also supported.
',3)),e("div",c,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"BPE Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=s("The DSF BPE reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),r(n,{to:"/v1.5.1/maintain/bpe/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[s("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=s(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=s("."))])]),i[7]||(i[7]=a(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF BPE server accepts logout tokens at DEV_DSF_BPE_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF BPE server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/bpe:1.5.1
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-bpe
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
\\n"}');export{v as comp,f as data}; diff --git a/assets/oidc.html-Bgq1NlNy.js b/assets/oidc.html-Bgq1NlNy.js new file mode 100644 index 000000000..9083a431f --- /dev/null +++ b/assets/oidc.html-Bgq1NlNy.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,a,b as e,d as s,e as r,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const p={},c={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),l("div",null,[i[6]||(i[6]=a('Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF BPE server supports Authorization Code Flow for the user interface. Back-Channel Logout is also supported.
',3)),e("div",c,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"BPE Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=s("The DSF BPE reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),r(n,{to:"/v1.5.0/maintain/bpe/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[s("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=s(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=s("."))])]),i[7]||(i[7]=a(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF BPE server accepts logout tokens at DEV_DSF_BPE_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF BPE server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/bpe:1.5.0
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-bpe
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
\\n"}');export{v as comp,f as data}; diff --git a/assets/oidc.html-BjNAiIJl.js b/assets/oidc.html-BjNAiIJl.js new file mode 100644 index 000000000..307c5c280 --- /dev/null +++ b/assets/oidc.html-BjNAiIJl.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/stable/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.5.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-BsSFj2Bs.js b/assets/oidc.html-BsSFj2Bs.js new file mode 100644 index 000000000..f78b3905e --- /dev/null +++ b/assets/oidc.html-BsSFj2Bs.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.1.0/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.1.0
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-ByT9fRor.js b/assets/oidc.html-ByT9fRor.js new file mode 100644 index 000000000..83b4b7cb4 --- /dev/null +++ b/assets/oidc.html-ByT9fRor.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,a,b as e,d as s,e as r,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const p={},c={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),l("div",null,[i[6]||(i[6]=a('Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF BPE server supports Authorization Code Flow for the user interface. Back-Channel Logout is also supported.
',3)),e("div",c,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"BPE Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=s("The DSF BPE reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),r(n,{to:"/v1.7.0/maintain/bpe/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[s("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=s(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=s("."))])]),i[7]||(i[7]=a(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF BPE server accepts logout tokens at DEV_DSF_BPE_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF BPE server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/bpe:1.5.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-bpe
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
\\n"}');export{v as comp,f as data}; diff --git a/assets/oidc.html-Cb81bVAU.js b/assets/oidc.html-Cb81bVAU.js new file mode 100644 index 000000000..db6a35602 --- /dev/null +++ b/assets/oidc.html-Cb81bVAU.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.3.1/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.3.1
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-CkSwjsiH.js b/assets/oidc.html-CkSwjsiH.js new file mode 100644 index 000000000..d38757d81 --- /dev/null +++ b/assets/oidc.html-CkSwjsiH.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,a,b as e,d as s,e as r,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const p={},c={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),l("div",null,[i[6]||(i[6]=a('Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF BPE server supports Authorization Code Flow for the user interface. Back-Channel Logout is also supported.
',3)),e("div",c,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"BPE Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=s("The DSF BPE reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),r(n,{to:"/v1.5.2/maintain/bpe/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[s("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=s(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=s("."))])]),i[7]||(i[7]=a(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF BPE server accepts logout tokens at DEV_DSF_BPE_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF BPE server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/bpe:1.5.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-bpe
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
\\n"}');export{v as comp,f as data}; diff --git a/assets/oidc.html-CobvVxkW.js b/assets/oidc.html-CobvVxkW.js new file mode 100644 index 000000000..3c95ae562 --- /dev/null +++ b/assets/oidc.html-CobvVxkW.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.2.0/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.2.0
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-D1q5Feuz.js b/assets/oidc.html-D1q5Feuz.js new file mode 100644 index 000000000..6e1288c10 --- /dev/null +++ b/assets/oidc.html-D1q5Feuz.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,a,b as e,d as s,e as r,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const p={},c={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),l("div",null,[i[6]||(i[6]=a('Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF BPE server supports Authorization Code Flow for the user interface. Back-Channel Logout is also supported.
',3)),e("div",c,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"BPE Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=s("The DSF BPE reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),r(n,{to:"/v1.6.0/maintain/bpe/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[s("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=s(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=s("."))])]),i[7]||(i[7]=a(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF BPE server accepts logout tokens at DEV_DSF_BPE_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF BPE server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/bpe:1.5.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-bpe
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
\\n"}');export{v as comp,f as data}; diff --git a/assets/oidc.html-DDUONRpW.js b/assets/oidc.html-DDUONRpW.js new file mode 100644 index 000000000..685cd7b97 --- /dev/null +++ b/assets/oidc.html-DDUONRpW.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.7.0/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.5.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-DLVYL_s-.js b/assets/oidc.html-DLVYL_s-.js new file mode 100644 index 000000000..8432eda78 --- /dev/null +++ b/assets/oidc.html-DLVYL_s-.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.5.2/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.5.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-DbJNU4iG.js b/assets/oidc.html-DbJNU4iG.js new file mode 100644 index 000000000..863670a8d --- /dev/null +++ b/assets/oidc.html-DbJNU4iG.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.3.2/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.3.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-DlWufSfD.js b/assets/oidc.html-DlWufSfD.js new file mode 100644 index 000000000..5d6c67766 --- /dev/null +++ b/assets/oidc.html-DlWufSfD.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.4.0/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.4.0
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-DyXjwCxs.js b/assets/oidc.html-DyXjwCxs.js new file mode 100644 index 000000000..1b9073f8e --- /dev/null +++ b/assets/oidc.html-DyXjwCxs.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,a,b as e,d as s,e as r,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const p={},c={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),l("div",null,[i[6]||(i[6]=a('Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF BPE server supports Authorization Code Flow for the user interface. Back-Channel Logout is also supported.
',3)),e("div",c,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"BPE Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=s("The DSF BPE reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),r(n,{to:"/stable/maintain/bpe/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[s("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=s(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=s("."))])]),i[7]||(i[7]=a(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF BPE server accepts logout tokens at DEV_DSF_BPE_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF BPE server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/bpe:1.5.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-bpe
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF BPE server user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
\\n"}');export{f as comp,g as data}; diff --git a/assets/oidc.html-ROX4yxMZ.js b/assets/oidc.html-ROX4yxMZ.js new file mode 100644 index 000000000..f7cccfb5f --- /dev/null +++ b/assets/oidc.html-ROX4yxMZ.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.5.1/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.5.1
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-aVCCllMb.js b/assets/oidc.html-aVCCllMb.js new file mode 100644 index 000000000..6c068a06a --- /dev/null +++ b/assets/oidc.html-aVCCllMb.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.3.0/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.3.0
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-eTv7P-ln.js b/assets/oidc.html-eTv7P-ln.js new file mode 100644 index 000000000..91df5d85b --- /dev/null +++ b/assets/oidc.html-eTv7P-ln.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.6.0/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.5.2
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/oidc.html-rCbM1nU8.js b/assets/oidc.html-rCbM1nU8.js new file mode 100644 index 000000000..9d8d284f1 --- /dev/null +++ b/assets/oidc.html-rCbM1nU8.js @@ -0,0 +1,20 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as r,a as s,b as e,d as a,e as l,f as o,r as h,o as d}from"./app-BIWb5uIp.js";const c={},p={class:"hint-container tip"};function k(u,i){const n=h("RouteLink");return d(),r("div",null,[i[6]||(i[6]=s('Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
The DSF FHIR server supports Authorization Code Flow for the user interface as well as Bearer Token Authentication for the REST API. Back-Channel Logout is also supported.
',3)),e("div",p,[i[5]||(i[5]=e("p",{class:"hint-container-title"},"FHIR Reverse Proxy",-1)),e("p",null,[i[1]||(i[1]=a("The DSF FHIR reverse proxy requires client certificates by default. To use OpenID Connect authentication the configuration parameter ")),l(n,{to:"/v1.5.0/maintain/fhir/configuration/reverseproxy.html#ssl-verify-client"},{default:o(()=>i[0]||(i[0]=[a("SSL_VERIFY_CLIENT")])),_:1}),i[2]||(i[2]=a(" needs to be set to ")),i[3]||(i[3]=e("code",null,"optional",-1)),i[4]||(i[4]=a("."))])]),i[7]||(i[7]=s(`To enable authentication via OpenID Connect authorization code flow, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW to true
and specify the following parameters:
Optionally, back channel logout can be enabled by setting DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT to true
. The DSF FHIR server accepts logout tokens at DEV_DSF_FHIR_SERVER_BASE_URL + /back-channel-logout
. The path can be modified via DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT_PATH.
To enable bearer token authentication, set the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN to true
and specify the following parameter:
A number of additional DEV_DSF_SERVER_AUTH_OIDC ...
configuration parameter are specify on the DSF FHIR server configuration parameter page.
For example the configuration parameter DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS can be used to specify a PEM encoded file with trusted root certificates to be used when accessing the OpenID Connect provider. If not specify the JVM default trusted root certificates are used for this connection.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir:1.5.0
+ # ...
+ secrets:
+ - keycloak_root_ca.pem
+ # ...
+ environment:
+ # ...
+ DEV_DSF_SERVER_AUTH_OIDC_AUTHORIZATION_CODE_FLOW: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BACK_CHANNEL_LOGOUT: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_BEARER_TOKEN: 'true'
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_REALM_BASE_URL: https://keycloak.test.org/realms/dsf
+ DEV_DSF_SERVER_AUTH_OIDC_PROVIDER_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/keycloak_root_ca.pem
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_ID: dsf-fhir
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET: n9bCMtjugv3Y_.szktXyQ2RH5se+J%o3
+ # ...
+secrets:
+ keycloak_root_ca.pem:
+ file: ./secrets/keycloak_root_ca.pem
Access to the DSF FHIR server REST API and user interface can be configured via access control roles. By default users are only authenticated using X.509 client certificates, but authentication for local users via OAuth 2.0 OpenID Connect can also be enabled.
"}');export{f as comp,A as data}; diff --git a/assets/partners.html-CNkEzyU2.js b/assets/partners.html-CNkEzyU2.js new file mode 100644 index 000000000..fbb189b3d --- /dev/null +++ b/assets/partners.html-CNkEzyU2.js @@ -0,0 +1 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as t,a as n,o as r}from"./app-BIWb5uIp.js";const a={};function o(s,e){return r(),t("div",null,e[0]||(e[0]=[n('Funded for further development of the Data Sharing Framework are:
The GECKO Institute is a research institution of Heilbronn University of Applied Sciences and conducts research and development at the interfaces between medicine, economics and computer science. The DSF is part of Prof. Fegeler's research area: Interoperability and Digital Processes in Medicine. Other areas of research include educational technologies, health economics, and consumer health informatics.
The Institute of Medical Informatics belongs to the University of Heidelberg. The scientific focus of the institute is on structured patient data, e.g.
The LIFE Institute of the University of Leipzig manages collaborative and follow-on projects in the fields of medicine, public health, medical informatics, and biobanking.
',8)]))}const d=i(a,[["render",o],["__file","partners.html.vue"]]),h=JSON.parse(`{"path":"/about/learnmore/partners.html","title":"Partners","lang":"en-US","frontmatter":{"title":"Partners","icon":"proxy","gitInclude":[]},"headers":[{"level":2,"title":"Heilbronn University of Applied Sciences | Gecko Institute","slug":"heilbronn-university-of-applied-sciences-gecko-institute","link":"#heilbronn-university-of-applied-sciences-gecko-institute","children":[]},{"level":2,"title":"University of Heidelberg | Institute of Medical Informatics","slug":"university-of-heidelberg-institute-of-medical-informatics","link":"#university-of-heidelberg-institute-of-medical-informatics","children":[]},{"level":2,"title":"University of Leipzig | LIFE Institute","slug":"university-of-leipzig-life-institute","link":"#university-of-leipzig-life-institute","children":[]}],"readingTime":{"minutes":0.59,"words":176},"filePathRelative":"about/learnmore/partners.md","excerpt":"Funded for further development of the Data Sharing Framework are:
\\nThe GECKO Institute is a research institution of Heilbronn University of Applied Sciences and conducts research and development at the interfaces between medicine, economics and computer science. The DSF is part of Prof. Fegeler's research area: Interoperability and Digital Processes in Medicine. Other areas of research include educational technologies, health economics, and consumer health informatics.
"}`);export{d as comp,h as data}; diff --git a/assets/passwords-secrets.html-CwvsK-ST.js b/assets/passwords-secrets.html-CwvsK-ST.js new file mode 100644 index 000000000..cf0c84f2a --- /dev/null +++ b/assets/passwords-secrets.html-CwvsK-ST.js @@ -0,0 +1,15 @@ +import{_ as s}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,a,o as n}from"./app-BIWb5uIp.js";const t={};function l(r,e){return n(),i("div",null,e[0]||(e[0]=[a(`Environment variables ending in _PASSWORD
or _SECRET
can be configured using plain-text files. To achieve this, environment variable should be defined with _FILE
appended to the name with the value defined as the location of the file. For all variables ending in _PASSWORD_FILE
or _SECRET_FILE
the DSF FHIR and DSF BPE applications will read the content of the fist line of the referenced file and dynamically define the corresponding _PASSWORD
or _SECRET
environment variables with the read values.
/run/secrets/db_user_password
, the application will read the contents of the /run/secrets/db_user_password
file and set the DEV_DSF_BPE_DB_USER_PASSWORD
environment variable with the content from that file./run/secrets/oidc_client_secret
, the application will read the contents of the referenced file and set the DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET
environment variable accordingly.It is recommended to use docker secrets as files for these environment variables. Docker secrets are mounted as files in /run/secrets/<secret_name>
inside the container.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir
+ secrets:
+ - db_user_password
+ - oidc_client_secret
+ environment:
+ DEV_DSF_BPE_DB_USER_PASSWORD_FILE: /run/secrets/db_user_password
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET_FILE: /run/secrets/oidc_client_secret
+
+secrets:
+ db_user_password:
+ file: ./secrets/db_user.password
+ api_key:
+ file: ./secrets/oidc_client.secret
Environment variables ending in _PASSWORD
or _SECRET
can be configured using plain-text files. To achieve this, environment variable should be defined with _FILE
appended to the name with the value defined as the location of the file. For all variables ending in _PASSWORD_FILE
or _SECRET_FILE
the DSF FHIR and DSF BPE applications will read the content of the fist line of the referenced file and dynamically define the corresponding _PASSWORD
or _SECRET
environment variables with the read values.
Environment variables ending in _PASSWORD
or _SECRET
can be configured using plain-text files. To achieve this, environment variable should be defined with _FILE
appended to the name with the value defined as the location of the file. For all variables ending in _PASSWORD_FILE
or _SECRET_FILE
the DSF FHIR and DSF BPE applications will read the content of the fist line of the referenced file and dynamically define the corresponding _PASSWORD
or _SECRET
environment variables with the read values.
/run/secrets/db_user_password
, the application will read the contents of the /run/secrets/db_user_password
file and set the DEV_DSF_BPE_DB_USER_PASSWORD
environment variable with the content from that file./run/secrets/oidc_client_secret
, the application will read the contents of the referenced file and set the DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET
environment variable accordingly.It is recommended to use docker secrets as files for these environment variables. Docker secrets are mounted as files in /run/secrets/<secret_name>
inside the container.
services:
+ app:
+ image: ghcr.io/datasharingframework/fhir
+ secrets:
+ - db_user_password
+ - oidc_client_secret
+ environment:
+ DEV_DSF_BPE_DB_USER_PASSWORD_FILE: /run/secrets/db_user_password
+ DEV_DSF_SERVER_AUTH_OIDC_CLIENT_SECRET_FILE: /run/secrets/oidc_client_secret
+
+secrets:
+ db_user_password:
+ file: ./secrets/db_user.password
+ api_key:
+ file: ./secrets/oidc_client.secret
Environment variables ending in _PASSWORD
or _SECRET
can be configured using plain-text files. To achieve this, environment variable should be defined with _FILE
appended to the name with the value defined as the location of the file. For all variables ending in _PASSWORD_FILE
or _SECRET_FILE
the DSF FHIR and DSF BPE applications will read the content of the fist line of the referenced file and dynamically define the corresponding _PASSWORD
or _SECRET
environment variables with the read values.
In order to be able to solve the exercises described in this tutorial a software development environment with git, Java 11, Maven 3.8, Docker, docker-compose, a Java IDE like Eclipse or IntelliJ, a BPMN Editor like the Camunda Modeler a and minimum 16GB of RAM is needed.
git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency.
Processes for the DSF are written using the Java programming language in version 11. Various open source releases of the Java Developer Kit (JDK) 11 exist, you are free in your choice.
When implementing DSF processes, we use Maven 3.8 to manage the software project's build, reporting and documentation workflow.
In order to download DSF packages from the GitHub Package Registry using Maven you need a personal GitHub access token with scope read:packages
. This GitHub documentation shows you how to generate one.
After that, add the following server
configuration to your local .m2/settings.xml
. Replace USERNAME
with your GitHub username and TOKEN
with the previously generated personal GitHub access token. The token needs at least the read:packages
scope.
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
+ http://maven.apache.org/xsd/settings-1.0.0.xsd">
+
+ ...
+
+ <servers>
+ <server>
+ <id>github</id>
+ <username>USERNAME</username>
+ <password>TOKEN</password>
+ </server>
+ </servers>
+</settings>
To be able to test the implemented processes, we use a test-setup based on Docker and docker-compose. This allows us to simulate multiple organizations with different roles and run the processes across "organizational boundaries".
The following entries are required in the hosts
file of your computer so that the FHIR servers of the simulated organizations can be accessed in your web browser. On Linux and Mac this file is located at /etc/hosts
. On Windows you can find it at C:\\Windows\\System32\\drivers\\etc\\hosts
127.0.0.1 dic
+127.0.0.1 cos
+127.0.0.1 hrp
For the development of the processes we recommend the use of an IDE, e.g. Eclipse or IntelliJ:
To simplify modeling of BPMN processes, we recommend a graphical editor, e.g. the Camunda Modeler:
The minimum hardware requirements to run all simulated organizations as part of the Docker test-setup is 16 GB of RAM.
In order to be able to solve the exercises described in this tutorial a software development environment with git, Java 11, Maven 3.8, Docker, docker-compose, a Java IDE like Eclipse or IntelliJ, a BPMN Editor like the Camunda Modeler a and minimum 16GB of RAM is needed.
\\ngit is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency.
"}');export{m as comp,v as data}; diff --git a/assets/process-plugins-advanced.html-Aiosif86.js b/assets/process-plugins-advanced.html-Aiosif86.js new file mode 100644 index 000000000..3627f6ce8 --- /dev/null +++ b/assets/process-plugins-advanced.html-Aiosif86.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.6.0/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.6.0/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-BXZYZl-l.js b/assets/process-plugins-advanced.html-BXZYZl-l.js new file mode 100644 index 000000000..320669dcd --- /dev/null +++ b/assets/process-plugins-advanced.html-BXZYZl-l.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const t={};function c(a,r){return n(),s("div")}const d=e(t,[["render",c],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/stable/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"stable/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-C4k17IeZ.js b/assets/process-plugins-advanced.html-C4k17IeZ.js new file mode 100644 index 000000000..c153412d3 --- /dev/null +++ b/assets/process-plugins-advanced.html-C4k17IeZ.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.7.0/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.7.0/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-CryoSrgT.js b/assets/process-plugins-advanced.html-CryoSrgT.js new file mode 100644 index 000000000..fd04c39fb --- /dev/null +++ b/assets/process-plugins-advanced.html-CryoSrgT.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.3.2/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.3.2/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-CvYyNIjF.js b/assets/process-plugins-advanced.html-CvYyNIjF.js new file mode 100644 index 000000000..4702beb74 --- /dev/null +++ b/assets/process-plugins-advanced.html-CvYyNIjF.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.1.0/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.1.0/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-CvfI-O1o.js b/assets/process-plugins-advanced.html-CvfI-O1o.js new file mode 100644 index 000000000..48b0112d0 --- /dev/null +++ b/assets/process-plugins-advanced.html-CvfI-O1o.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.3.0/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.3.0/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-DDT8mKAK.js b/assets/process-plugins-advanced.html-DDT8mKAK.js new file mode 100644 index 000000000..7d3d36db7 --- /dev/null +++ b/assets/process-plugins-advanced.html-DDT8mKAK.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.5.2/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.5.2/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-DEwVR5E2.js b/assets/process-plugins-advanced.html-DEwVR5E2.js new file mode 100644 index 000000000..5f80eb87f --- /dev/null +++ b/assets/process-plugins-advanced.html-DEwVR5E2.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.5.1/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.5.1/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-DK2hR7GM.js b/assets/process-plugins-advanced.html-DK2hR7GM.js new file mode 100644 index 000000000..d519e69e9 --- /dev/null +++ b/assets/process-plugins-advanced.html-DK2hR7GM.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.3.1/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.3.1/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-DknbFNSU.js b/assets/process-plugins-advanced.html-DknbFNSU.js new file mode 100644 index 000000000..f81b1fbce --- /dev/null +++ b/assets/process-plugins-advanced.html-DknbFNSU.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.5.0/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.5.0/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-DrS0U-59.js b/assets/process-plugins-advanced.html-DrS0U-59.js new file mode 100644 index 000000000..eee385aec --- /dev/null +++ b/assets/process-plugins-advanced.html-DrS0U-59.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.0.0/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.0.0/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-Gkd0KZn-.js b/assets/process-plugins-advanced.html-Gkd0KZn-.js new file mode 100644 index 000000000..5a120b020 --- /dev/null +++ b/assets/process-plugins-advanced.html-Gkd0KZn-.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.2.0/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.2.0/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins-advanced.html-uS_KMulc.js b/assets/process-plugins-advanced.html-uS_KMulc.js new file mode 100644 index 000000000..26a79d8a3 --- /dev/null +++ b/assets/process-plugins-advanced.html-uS_KMulc.js @@ -0,0 +1 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,o as n}from"./app-BIWb5uIp.js";const c={};function t(a,r){return n(),s("div")}const d=e(c,[["render",t],["__file","process-plugins-advanced.html.vue"]]),l=JSON.parse('{"path":"/v1.4.0/process-plugins-advanced.html","title":"Process Plugins Advanced","lang":"en-US","frontmatter":{"title":"Process Plugins Advanced","icon":"info","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.02,"words":6},"filePathRelative":"v1.4.0/process-plugins-advanced.md","excerpt":""}');export{d as comp,l as data}; diff --git a/assets/process-plugins.html-BYVzwms7.js b/assets/process-plugins.html-BYVzwms7.js new file mode 100644 index 000000000..1ce422d0a --- /dev/null +++ b/assets/process-plugins.html-BYVzwms7.js @@ -0,0 +1 @@ +import{_ as s}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,a as o,o as n}from"./app-BIWb5uIp.js";const t="/photos/info/plugins/bpmn-example.png",a="/photos/info/use-cases/ping-pong.png",r={};function l(p,e){return n(),i("div",null,e[0]||(e[0]=[o('It is important to understand that the DSF is only the silent helper in the background: a middleware. The DSF is use case agnostic. This means that process plugins make it possible to execute almost any use case you can imagine with the DSF. Process plugins provide individual functionality. For example, it is possible to use the Ping Pong process to test bilateral communication or the Feasibility process to perform feasibility queries for research.
However, it is possible to deploy several process plugins together, even the same process plugin in different versions. A process plugin is basically an archive of BPMN 2.0 models, FHIR R4 resources and Java code. This process plugin is deployed as a Jar file on the BPE.
BPMN models can be created with Camunda Modeler. The following model is a BPMN model consisting of two lanes: These are the square boxes, i.e. Organization A and B. This process is intended only as an example to illustrate the formalities. We will look at realistic processes in the next chapter.
The ping process plugin can be used for (periodic) connection testing between organizations that are part of your DSF allow list. The following figure shows a representation of the process.
The ping pong process is composed of 3 different subprocesses:
The autostart ping process is used to execute connection tests in a predefined interval. This subprocess performs the following steps:
The ping process is used to check outgoing and incoming connections to organizations in your allow-list. This subprocess performs the following steps:
The pong process is used to send a response during the connection test to the requesting organization. This subprocess performs the following steps:
It is important to understand that the DSF is only the silent helper in the background: a middleware. The DSF is use case agnostic. This means that process plugins make it possible to execute almost any use case you can imagine with the DSF. Process plugins provide individual functionality. For example, it is possible to use the Ping Pong process to test bilateral communication or the Feasibility process to perform feasibility queries for research.
\\nHowever, it is possible to deploy several process plugins together, even the same process plugin in different versions. A process plugin is basically an archive of BPMN 2.0 models, FHIR R4 resources and Java code. This process plugin is deployed as a Jar file on the BPE.
If the BPE will be deployed behind a proxy, you can test the internet access of the BPE using the tool in dsf-tools/dsf-tools-proxy-test
.
The proxy test jar takes three input variables:
The proxy password will be asked as command line input. The name of the main class is ProxyTest
.
If the BPE will be deployed behind a proxy, you can test the internet access of the BPE using the tool in dsf-tools/dsf-tools-proxy-test
.
The proxy test jar takes three input variables:
\\nThe DSF Community connects the DSF competencies of the entire Medical Informatics Initiative (MII) so that new use cases, in particular from Module 3 or the Network University Medicine (NUM), are better supported in creating the respective DSF plugins. In the context of Module 2b, the DSF Community contributes to the further development of the common digital infrastructure of the MII together with the FDPG+ and TRANSIT projects. BMBF
Heilbronn University (HHN) focuses on the connecting of DSF related topics and competencies in the MII with the community management. The quality assurance of new DSF plugins and further development of the application and core components aim at a scalable, stable and secure operation of the DSF (Funding code: 01ZZ2307A).
Heidelberg University is focusing on the further development of the DSF core components as well as the rollout of the DSF plugins, in particular also from the perspective of a data integration center in consultation with the FDGP+ and TRANSIT projects (Funding code: 01ZZ2307B).
The University of Leipzig will further develop and adapt the DSF plugin for the Research Data Portal for Health FDPG according to specifications resulting from the FDPG+ project and enable Data Integration Centers (DIC) to operate the FDPG plugin (Funding code: 01ZZ2307C).
HiGHmed aims to increase the efficiency of clinical research and improve patient care through new medical informatics solutions and cross-organizational data exchange. The concept will be developed on the basis of three defined prototypical use cases, which will ensure close integration with the requirements of patient care and medical research. The GECKO Institute of HHN will establish a trusted third party with pseudonymization service to implement the goals of HiGHmed and is involved in the technical development of the DSF together with the partners (Funding code: 01ZZ1802E). Heidelberg University is in particular involved in the technical development of the DSF (Funding code: 01ZZ1802A).
',14)]))}const m=n(h,[["render",d],["__file","public.html.vue"]]),u=JSON.parse('{"path":"/about/learnmore/public.html","title":"Public Funding","lang":"en-US","frontmatter":{"title":"Public Funding","icon":"free","gitInclude":[]},"headers":[],"readingTime":{"minutes":1.3,"words":391},"filePathRelative":"about/learnmore/public.md","excerpt":"The DSF Community connects the DSF competencies of the entire Medical Informatics Initiative (MII) so that new use cases, in particular from Module 3 or the Network University Medicine (NUM), are better supported in creating the respective DSF plugins. In the context of Module 2b, the DSF Community contributes to the further development of the common digital infrastructure of the MII together with the FDPG+ and TRANSIT projects. BMBF
"}');export{m as comp,u as data}; diff --git a/assets/publications.html-B4U9UGg2.js b/assets/publications.html-B4U9UGg2.js new file mode 100644 index 000000000..63298d527 --- /dev/null +++ b/assets/publications.html-B4U9UGg2.js @@ -0,0 +1 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as a,a as n,b as r,d as t,e as s,f as l,r as d,o as h}from"./app-BIWb5uIp.js";const p={};function c(H,e){const o=d("RouteLink");return h(),a("div",null,[e[3]||(e[3]=n('H. Hund, R. Wettstein, C.M. Heidt, C. Fegeler, Executing Distributed Healthcare and Research Processes – The HiGHmed Data Sharing Framework, Stud Health Technol Inform, 278 (2021) 126-133, doi:10.3233/SHTI210060
R. Wettstein, H. Hund, I. Kobylinski, C. Fegeler, O. Heinze, Feasibility Queries in Distributed Architectures – Concept and Implementation in HiGHmed, Stud Health Technol Inform, 278 (2021) 134-141, doi:10.3233/SHTI210061
C.M. Heidt, H. Hund, C. Fegeler, A Federated Record Linkage Algorithm for Secure Medical Data Sharing, Stud Health Technol Inform, 278 (2021) 142-149, doi:10.3233/SHTI210062
R. Wettstein, H. Hund, C. Fegeler, O. Heinze, Data Sharing in Distributed Architectures – Concept and Implementation in HiGHmed, Stud Health Technol Inform, 283 (2021) 111-118, doi:10.3233/SHTI210548
H.-U. Prokosch, T. Bahls, M. Bialke, J. Eils, C. Fegeler, J. Gruendner, B. Haarbrandt, C. Hampf, W. Hoffmann, H. Hund, et al. The COVID-19 Data Exchange Platform of the German University Medicine, Stud Health Technol Inform, 294 (2022) 674-678, doi:10.3233/SHTI220554
R. Wettstein, T. Kussel, H. Hund, C. Fegeler, M. Dugas, K. Hamacher, Secure Multi-Party Computation Based Distributed Feasibility Queries – A HiGHmed Use Case, Stud Health Technol Inform, 296 (2022) 41-49, doi:10.3233/SHTI220802
C. Zilske, M. Kurscheidt, S.T. Schweizer, H. Hund, S. Mödinger, C. Fegeler, Monitoring Distributed Business Processes in Biomedical Research, Stud Health Technol Inform, 302 (2023) 252-256, doi:10.3233/SHTI230113
H. Hund, R. Wettstein, C. Hampf, M. Bialke, M. Kurscheidt, S.T. Schweizer, C. Zilske, S. Mödinger, C. Fegeler, No Transfer Without Validation: A Data Sharing Framework Use Case, Stud Health Technol Inform, 302 (2023) 68-72, doi:10.3233/SHTI230066
H. Hund, R. Wettstein, M. Kurscheidt, S.T. Schweizer, C. Zilske, C. Fegeler, Interoperability Is a Process- The Data Sharing Framework, Stud Health Technol Inform, 310 (2024) 28-32, doi:10.3233/SHTI230921
S.T. Schweizer, H. Hund, M. Kurscheidt, C. Zilske, J.P. Böhringer, C. Fegeler, Handling Complexity in Decentralized Research Networks: The Data Sharing Framework Allowlist Management Application, Stud Health Technol Inform, 317 (2024) 85-93, doi:10.3233/SHTI240841
H. Hund, R. Wettstein, C.M. Heidt, C. Fegeler, Executing Distributed Healthcare and Research Processes – The HiGHmed Data Sharing Framework, Stud Health Technol Inform, 278 (2021) 126-133, doi:10.3233/SHTI210060
\\nR. Wettstein, H. Hund, I. Kobylinski, C. Fegeler, O. Heinze, Feasibility Queries in Distributed Architectures – Concept and Implementation in HiGHmed, Stud Health Technol Inform, 278 (2021) 134-141, doi:10.3233/SHTI210061
\\nC.M. Heidt, H. Hund, C. Fegeler, A Federated Record Linkage Algorithm for Secure Medical Data Sharing, Stud Health Technol Inform, 278 (2021) 142-149, doi:10.3233/SHTI210062
\\nR. Wettstein, H. Hund, C. Fegeler, O. Heinze, Data Sharing in Distributed Architectures – Concept and Implementation in HiGHmed, Stud Health Technol Inform, 283 (2021) 111-118, doi:10.3233/SHTI210548
\\nH.-U. Prokosch, T. Bahls, M. Bialke, J. Eils, C. Fegeler, J. Gruendner, B. Haarbrandt, C. Hampf, W. Hoffmann, H. Hund, et al. The COVID-19 Data Exchange Platform of the German University Medicine, Stud Health Technol Inform, 294 (2022) 674-678, doi:10.3233/SHTI220554
\\nR. Wettstein, T. Kussel, H. Hund, C. Fegeler, M. Dugas, K. Hamacher, Secure Multi-Party Computation Based Distributed Feasibility Queries – A HiGHmed Use Case, Stud Health Technol Inform, 296 (2022) 41-49, doi:10.3233/SHTI220802
\\nC. Zilske, M. Kurscheidt, S.T. Schweizer, H. Hund, S. Mödinger, C. Fegeler, Monitoring Distributed Business Processes in Biomedical Research, Stud Health Technol Inform, 302 (2023) 252-256, doi:10.3233/SHTI230113
\\nH. Hund, R. Wettstein, C. Hampf, M. Bialke, M. Kurscheidt, S.T. Schweizer, C. Zilske, S. Mödinger, C. Fegeler, No Transfer Without Validation: A Data Sharing Framework Use Case, Stud Health Technol Inform, 302 (2023) 68-72, doi:10.3233/SHTI230066
\\nH. Hund, R. Wettstein, M. Kurscheidt, S.T. Schweizer, C. Zilske, C. Fegeler, Interoperability Is a Process- The Data Sharing Framework, Stud Health Technol Inform, 310 (2024) 28-32, doi:10.3233/SHTI230921
\\nS.T. Schweizer, H. Hund, M. Kurscheidt, C. Zilske, J.P. Böhringer, C. Fegeler, Handling Complexity in Decentralized Research Networks: The Data Sharing Framework Allowlist Management Application, Stud Health Technol Inform, 317 (2024) 85-93, doi:10.3233/SHTI240841
\\n-SNAPSHOT
from version in pom.xml filesdraft
to active
and update dates, verify versions in FHIR resourcesversion
and date-released
in CITATION.cff-SNAPSHOT
in versions with -RC#
in pom.xml filesdraft
to active
and update dates, verify versions in FHIR resourcesversion
and date-released
in CITATION.cff-SNAPSHOT
from version in pom.xml filesdraft
to active
and update dates, verify versions in FHIR resourcesversion
and date-released
in CITATION.cffapp
, 172.28.1.3
443
my-external.fqdn:443
30
seconds30
seconds60
seconds60
secondsSSLCACertificateFile
/run/secrets/ssl_ca_certificate_file.pem
Acceptable client certificate CA names
send to the client, during TLS handshake, sets the apache httpd parameter SSLCADNRequestFile
; if omitted all entries from SSL_CA_CERTIFICATE_FILE
are used/run/secrets/ssl_ca_dn_request_file.pem
SSLCertificateChainFile
; can be omitted if either no chain is needed (aka self signed server certificate) or the file specified via SSL_CERTIFICATE_FILE
contains the certificate chain/run/secrets/ssl_certificate_chain_file.pem
SSLCertificateFile
/run/secrets/ssl_certificate_file.pem
SSLCertificateKeyFile
/run/secrets/ssl_certificate_key_file.pem
app
, 172.28.1.3
A number of trusted certificate authorities (CAs) are included in the DSF docker images fhir_proxy, fhir, bpe_proxy and bpe by default. Root and intermediate certificates as well as the configured usage of issuing CAs as either server, client oder server and client CA are listed at the end.
X.509 certificates of default trusted CAs are stored as .pem files containing multiple certificates in the docker images and can be replaced by either using docker bind mounts or configuring appropriate environment variables with different targets.
Defaults are configured for the list of issuing, intermediate and root CAs used for validating client certificates (Apache httpd mod_ssl configuration option SSLCACertificateFile) as well as the CA Certificates for defining acceptable CA names (option SSLCADNRequestFile).
Use the following environment variable to configure non default .pem files or override the existing files using docker bind mounts:
Note: Default file location are relative to the docker image work directory /opt/fhir
.
Defaults are configured for the list of issuing, intermediate and root CAs used for validating client certificates (Apache httpd mod_ssl configuration option SSLCACertificateFile) as well as the CA Certificates for defining acceptable CA names (option SSLCADNRequestFile).
Use the following environment variable to configure non default .pem files or override the existing files using docker bind mounts:
Note: Default file location are relative to the docker image work directory /opt/bpe
.
If not mentioned explicitly, issuing CAs listed will sign X.509 certificates with Extended Key Usage entries TLS WWW server authentication
and TLS WWW client authentication
.
A number of trusted certificate authorities (CAs) are included in the DSF docker images fhir_proxy, fhir, bpe_proxy and bpe by default. Root and intermediate certificates as well as the configured usage of issuing CAs as either server, client oder server and client CA are listed at the end.
"}');export{b as comp,C as data}; diff --git a/assets/root-certificates.html-DBTM8PxT.js b/assets/root-certificates.html-DBTM8PxT.js new file mode 100644 index 000000000..6ab0a72c1 --- /dev/null +++ b/assets/root-certificates.html-DBTM8PxT.js @@ -0,0 +1 @@ +import{_ as o}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as l,a,b as r,e as n,f as s,d as t,r as f,o as d}from"./app-BIWb5uIp.js";const p={};function h(u,e){const i=f("RouteLink");return d(),l("div",null,[e[77]||(e[77]=a('A number of trusted certificate authorities (CAs) are included in the DSF docker images fhir_proxy, fhir, bpe_proxy and bpe by default. Root and intermediate certificates as well as the configured usage of issuing CAs as either server, client oder server and client CA are listed at the end.
X.509 certificates of default trusted CAs are stored as .pem files containing multiple certificates in the docker images and can be replaced by either using docker bind mounts or configuring appropriate environment variables with different targets.
Defaults are configured for the list of issuing, intermediate and root CAs used for validating client certificates (Apache httpd mod_ssl configuration option SSLCACertificateFile) as well as the CA Certificates for defining acceptable CA names (option SSLCADNRequestFile).
Use the following environment variable to configure non default .pem files or override the existing files using docker bind mounts:
Note: Default file location are relative to the docker image work directory /opt/fhir
.
Defaults are configured for the list of issuing, intermediate and root CAs used for validating client certificates (Apache httpd mod_ssl configuration option SSLCACertificateFile) as well as the CA Certificates for defining acceptable CA names (option SSLCADNRequestFile).
Use the following environment variable to configure non default .pem files or override the existing files using docker bind mounts:
Note: Default file location are relative to the docker image work directory /opt/bpe
.
If not mentioned explicitly, issuing CAs listed will sign X.509 certificates with Extended Key Usage entries TLS WWW server authentication
and TLS WWW client authentication
.
A number of trusted certificate authorities (CAs) are included in the DSF docker images fhir_proxy, fhir, bpe_proxy and bpe by default. Root and intermediate certificates as well as the configured usage of issuing CAs as either server, client oder server and client CA are listed at the end.
"}');export{b as comp,C as data}; diff --git a/assets/security.html-DlCXhwuq.js b/assets/security.html-DlCXhwuq.js new file mode 100644 index 000000000..529b80cab --- /dev/null +++ b/assets/security.html-DlCXhwuq.js @@ -0,0 +1 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,a,o as n}from"./app-BIWb5uIp.js";const r="/photos/info/security/certificates-light.svg",s="/photos/info/security/certificates-dark.svg",o={};function c(l,e){return n(),i("div",null,e[0]||(e[0]=[a('The open-source Data Sharing Framework is EU-GDPR compliant and meets the highest security standards by design. DSF FHIR servers only accept certain FHIR resources from internal systems/administrators (e.g. tasks, binary resources...). In addition, the communication partners are defined via Allow Lists. This means that an organisation can only communicate with organisations that are included in the allow list of approved organisations of the participating organisations. More information about allow lists can be found in the next chapter.
For transport encryption, the TLS protocol is used. Secure Web Socket (WSS) connections provide security for the connection between the DSF FHIR server (DMZ) and the BPE (internal network). In addition, the DSF is being actively developed and there is an excellent community, both of which guarantee fast security patches.
Authentication of organizations within the DSF is handled by the use of X.509 client and server certificates. Currently the certificate authorities run by DFN-PKI Global G2, D-Trust via TMF e.V. and GÉANT TCS via DFN are supported. All participating organizations are entered in a distributed and synchronized allow-list of valid organizations and certificates.
A webserver certificate is needed to run the FHIR endpoint and a 802.1X client certificate is used to authenticate against other organizations endpoints and as a server certificate for the business process engine. For available certificate profiles see DFN-PKI-Zertifikatprofile_Global.pdf
More information about client and server certificates can be found here.
Server certificates are used to authenticate the FHIR endpoint on the local network and to other organisations
More information: Parameters FHIR Server
Client certificates are used to authenticate against remote FHIR endpoints (when either the BPE server or the FHIR endpoint server acts as the client).
In addition, client certificates are used to authenticate the Business Process Engine server on the local network.
More information: Parameters BPE Server
',17)]))}const f=t(o,[["render",c],["__file","security.html.vue"]]),u=JSON.parse('{"path":"/intro/info/security.html","title":"Security by Design","lang":"en-US","frontmatter":{"title":"Security by Design","icon":"safe","gitInclude":[]},"headers":[{"level":2,"title":"Basics Security","slug":"basics-security","link":"#basics-security","children":[]},{"level":2,"title":"Authentication","slug":"authentication","link":"#authentication","children":[{"level":3,"title":"Certificate Requests 🔒","slug":"certificate-requests","link":"#certificate-requests","children":[]}]}],"readingTime":{"minutes":1.54,"words":461},"filePathRelative":"intro/info/security.md","excerpt":"The open-source Data Sharing Framework is EU-GDPR compliant and meets the highest security standards by design. DSF FHIR servers only accept certain FHIR resources from internal systems/administrators (e.g. tasks, binary resources...). In addition, the communication partners are defined via Allow Lists. This means that an organisation can only communicate with organisations that are included in the allow list of approved organisations of the participating organisations. More information about allow lists can be found in the next chapter.
\\nFor transport encryption, the TLS protocol is used. Secure Web Socket (WSS) connections provide security for the connection between the DSF FHIR server (DMZ) and the BPE (internal network). In addition, the DSF is being actively developed and there is an excellent community, both of which guarantee fast security patches.
The funded project team includes:
We thank additional contributors, including:
',4)]))}const p=r(a,[["render",l],["__file","team.html.vue"]]),s=JSON.parse('{"path":"/about/learnmore/team.html","title":"Team","lang":"en-US","frontmatter":{"title":"Team","icon":"group","gitInclude":[]},"headers":[],"readingTime":{"minutes":0.21,"words":62},"filePathRelative":"about/learnmore/team.md","excerpt":"The funded project team includes:
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nDo not upgrade unless prompted!
Please do not upgrade your DSF installation from DSF 0.9.x to DSF 1.x unless prompted to do so (e.g. from the German MII).
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported. Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Do not upgrade unless prompted!
\\nPlease do not upgrade your DSF installation from DSF 0.9.x to DSF 1.x unless prompted to do so (e.g. from the German MII).
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported. Instead, please perform the following steps:
"}');export{d as comp,c as data}; diff --git a/assets/upgrade-from-0.html-BfIVWmGA.js b/assets/upgrade-from-0.html-BfIVWmGA.js new file mode 100644 index 000000000..8d114e7bb --- /dev/null +++ b/assets/upgrade-from-0.html-BfIVWmGA.js @@ -0,0 +1 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as o,a as n,o as a}from"./app-BIWb5uIp.js";const i={};function r(s,e){return a(),o("div",null,e[0]||(e[0]=[n('A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nDo not upgrade unless prompted!
Please do not upgrade your DSF installation from DSF 0.9.x to DSF 1.x unless prompted to do so (e.g. from the German MII).
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported. Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Do not upgrade unless prompted!
\\nPlease do not upgrade your DSF installation from DSF 0.9.x to DSF 1.x unless prompted to do so (e.g. from the German MII).
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported. Instead, please perform the following steps:
"}');export{d as comp,c as data}; diff --git a/assets/upgrade-from-0.html-Bvg37v8B.js b/assets/upgrade-from-0.html-Bvg37v8B.js new file mode 100644 index 000000000..8fa495b11 --- /dev/null +++ b/assets/upgrade-from-0.html-Bvg37v8B.js @@ -0,0 +1 @@ +import{_ as t}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as o,a as n,o as a}from"./app-BIWb5uIp.js";const i={};function r(s,e){return a(),o("div",null,e[0]||(e[0]=[n('A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
Do not use your 0.9.x configuration as starting point
There are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
Please use the new installation manual to perform a new installation and use the old setup only for reference.
Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)DEV_DSF
instead of ORG_HIGHMED
)./opt/fhir_0.9
and /opt/bpe_0.9
directories).New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Use your old virtual machine
We recommand the usage of your old DSF 0.9.x virtual machine for your new DSF 1.x setup. This will ensure that you already have the required firewall settings in place.
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported.
\\nDo not use your 0.9.x configuration as starting point
\\nThere are too many changes between DSF 0.9.x and DSF 1.x to use the old configuration as starting point and just adapt some configuration parameter names.
\\nPlease use the new installation manual to perform a new installation and use the old setup only for reference.
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nWork in progress
We are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
Work in progress
\\nWe are currently in the process of updating the written documentation on how to upgrade a DSF process plugin to DSF 1.0.0. In the meantime we recommend to take a look at the provided sample plugin and the upgraded DSF plugins:
\\nDo not upgrade unless prompted!
Please do not upgrade your DSF installation from DSF 0.9.x to DSF 1.x unless prompted to do so (e.g. from the German MII).
A direct upgrade from DSF 0.9.x to DSF 1.x is not supported. Instead, please perform the following steps:
/opt/bpe
and /opt/fhir
to /opt/bpe_0.9
and /opt/fhir_0.9
)New process plugins
Please do not copy your old process plugins into the new DSF. There will be a new release for each process plugin at the time you will be prompted to update your DSF instance.
Do not upgrade unless prompted!
\\nPlease do not upgrade your DSF installation from DSF 0.9.x to DSF 1.x unless prompted to do so (e.g. from the German MII).
\\nA direct upgrade from DSF 0.9.x to DSF 1.x is not supported. Instead, please perform the following steps:
"}');export{d as comp,c as data}; diff --git a/assets/upgrade-from-1.html-1Wof4LeJ.js b/assets/upgrade-from-1.html-1Wof4LeJ.js new file mode 100644 index 000000000..5e2a13341 --- /dev/null +++ b/assets/upgrade-from-1.html-1Wof4LeJ.js @@ -0,0 +1,18 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as r}from"./app-BIWb5uIp.js";const n={};function l(t,e){return r(),s("div",null,e[0]||(e[0]=[a(`Upgrading the DSF from 1.5.2 to 1.6.0 involves modifying the docker-compose.yml files and recreating the containers.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.6.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.6.0.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.5.2
++ image: ghcr.io/datasharingframework/fhir_proxy:1.6.0
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.5.2
++ image: ghcr.io/datasharingframework/fhir:1.6.0
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.6.0_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.6.0.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.5.2
++ image: ghcr.io/datasharingframework/bpe:1.6.0
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.6.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.6.0, [...]
Upgrading the DSF from 1.5.2 to 1.6.0 involves modifying the docker-compose.yml files and recreating the containers.
\\nPreparation / Backup
\\n/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.6.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.6.0.
\\nUpgrading the DSF from 1.4.0 to 1.5.0 involves modifying the docker-compose.yml files and recreating the containers.
Update to DSF 1.2.0 first
When upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.5.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.5.0.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.4.0
++ image: ghcr.io/datasharingframework/fhir_proxy:1.5.0
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.4.0
++ image: ghcr.io/datasharingframework/fhir:1.5.0
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.5.0_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.5.0.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.4.0
++ image: ghcr.io/datasharingframework/bpe:1.5.0
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.5.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.5.0, [...]
Upgrading the DSF from 1.4.0 to 1.5.0 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpdate to DSF 1.2.0 first
\\nWhen upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
\\nUpgrading the DSF from 1.3.2 to 1.4.0 involves modifying the docker-compose.yml files and recreating the containers.
Update to DSF 1.2.0 first
When upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.4.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.4.0.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.3.2
++ image: ghcr.io/datasharingframework/fhir_proxy:1.4.0
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.3.2
++ image: ghcr.io/datasharingframework/fhir:1.4.0
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.4.0_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.4.0.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.3.2
++ image: ghcr.io/datasharingframework/bpe:1.4.0
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.4.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.4.0, [...]
Upgrading the DSF from 1.3.2 to 1.4.0 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpdate to DSF 1.2.0 first
\\nWhen upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
\\nUpgrading the DSF from 1.5.1 to 1.5.2 involves modifying the docker-compose.yml files and recreating the containers.
Update to DSF 1.2.0 first
When upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.5.2_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.5.2.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.5.1
++ image: ghcr.io/datasharingframework/fhir_proxy:1.5.2
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.5.1
++ image: ghcr.io/datasharingframework/fhir:1.5.2
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.5.2_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.5.2.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.5.1
++ image: ghcr.io/datasharingframework/bpe:1.5.2
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.5.2, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.5.2, [...]
Upgrading the DSF from 1.5.1 to 1.5.2 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpdate to DSF 1.2.0 first
\\nWhen upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
\\nUpgrading the DSF from 1.6.0 to 1.7.0 involves modifying the docker-compose.yml files and recreating the containers. In addition a number of docker secrets and environment variables can be removed if the newly introduced default values are sufficient. The now integrated defaults for trusted certificate authorities include updated configuration for the new certificate authority HARICA used by DFN e.V. and GÉANT TCS. See the Default Root Certificates page for more details and to learn how to configure overrides.
Note: The configurations of trust stores for TLS connections managed by process plugins (for example HTTPS connections to the local KDS FHIR server) have not been modified with this release.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.7.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.7.0 and remove not needed secrets and environment variables.
version: '3.8'
+ services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.6.0
++ image: ghcr.io/datasharingframework/fhir_proxy:1.7.0
+ restart: on-failure
+...
+ - ssl_certificate_file.pem
+ - ssl_certificate_key_file.pem
+ - ssl_certificate_chain_file.pem
+- - ssl_ca_certificate_file.pem
+- - ssl_ca_dn_request_file.pem
+ environment:
+...
+ SSL_CERTIFICATE_FILE: /run/secrets/ssl_certificate_file.pem
+ SSL_CERTIFICATE_KEY_FILE: /run/secrets/ssl_certificate_key_file.pem
+ SSL_CERTIFICATE_CHAIN_FILE: /run/secrets/ssl_certificate_chain_file.pem
+- SSL_CA_CERTIFICATE_FILE: /run/secrets/ssl_ca_certificate_file.pem
+- SSL_CA_DN_REQUEST_FILE: /run/secrets/ssl_ca_dn_request_file.pem
+ networks:
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.6.0
++ image: ghcr.io/datasharingframework/fhir:1.7.0
+ restart: on-failure
+...
+ secrets:
+ - db_liquibase.password
+ - db_user.password
+ - db_user_permanent_delete.password
+- - app_client_trust_certificates.pem
+ - app_client_certificate.pem
+ - app_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+- - ssl_ca_certificate_file.pem
+ volumes:
+...
+ environment:
+ TZ: Europe/Berlin
+- DEV_DSF_SERVER_AUTH_TRUST_CLIENT_CERTIFICATE_CAS: /run/secrets/ssl_ca_certificate_file.pem
+ DEV_DSF_FHIR_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+...
+ DEV_DSF_FHIR_DB_USER_PERMANENT_DELETE_PASSWORD_FILE: /run/secrets/db_user_permanent_delete.password
+- DEV_DSF_FHIR_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/app_client_trust_certificates.pem
+ DEV_DSF_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_client_certificate.pem
+...
+ secrets:
+...
+- ssl_ca_certificate_file.pem:
+- file: ./secrets/ssl_ca_certificate_file.pem
+- ssl_ca_dn_request_file.pem:
+- file: ./secrets/ssl_ca_dn_request_file.pem
+...
+- app_client_trust_certificates.pem:
+- file: ./secrets/ssl_root_ca_certificate_file.pem
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.7.0_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.7.0 and remove not needed secrets and environment variables.
version: '3.8'
+ services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.6.0
++ image: ghcr.io/datasharingframework/bpe:1.7.0
+ restart: on-failure
+ secrets:
+ - db_liquibase.password
+ - db_user.password
+ - db_user_camunda.password
+- - app_client_trust_certificates.pem
+ - app_client_certificate.pem
+ - app_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+- - ssl_ca_certificate_file.pem
+ volumes:
+...
+ environment:
+ TZ: Europe/Berlin
+- DEV_DSF_SERVER_AUTH_TRUST_CLIENT_CERTIFICATE_CAS: /run/secrets/ssl_ca_certificate_file.pem
+ DEV_DSF_BPE_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+...
+ DEV_DSF_BPE_DB_USER_CAMUNDA_PASSWORD_FILE: /run/secrets/db_user_camunda.password
+- DEV_DSF_BPE_FHIR_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/app_client_trust_certificates.pem
+ DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_client_certificate.pem
+...
+ secrets:
+...
+- app_client_trust_certificates.pem:
+- file: ./secrets/ssl_root_ca_certificate_file.pem
+...
+- ssl_ca_certificate_file.pem:
+- file: ./secrets/ssl_ca_certificate_file.pem
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.7.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.7.0, [...]
Upgrading the DSF from 1.6.0 to 1.7.0 involves modifying the docker-compose.yml files and recreating the containers. In addition a number of docker secrets and environment variables can be removed if the newly introduced default values are sufficient. The now integrated defaults for trusted certificate authorities include updated configuration for the new certificate authority HARICA used by DFN e.V. and GÉANT TCS. See the Default Root Certificates page for more details and to learn how to configure overrides.
"}');export{c as comp,h as data}; diff --git a/assets/upgrade-from-1.html-CWtjTQLA.js b/assets/upgrade-from-1.html-CWtjTQLA.js new file mode 100644 index 000000000..307764092 --- /dev/null +++ b/assets/upgrade-from-1.html-CWtjTQLA.js @@ -0,0 +1,18 @@ +import{_ as e}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as r}from"./app-BIWb5uIp.js";const n={};function t(l,i){return r(),s("div",null,i[0]||(i[0]=[a(`Upgrading the DSF from 1.3.1 to 1.3.2 involves modifying the docker-compose.yml files and recreating the containers.
Update to DSF 1.2.0 first
When upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.3.2_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.3.2.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.3.1
++ image: ghcr.io/datasharingframework/fhir_proxy:1.3.2
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.3.1
++ image: ghcr.io/datasharingframework/fhir:1.3.2
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.3.2_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.3.2.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.3.1
++ image: ghcr.io/datasharingframework/bpe:1.3.2
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.3.2, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.3.2, [...]
Upgrading the DSF from 1.3.1 to 1.3.2 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpdate to DSF 1.2.0 first
\\nWhen upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
\\nUpgrading the DSF from 1.5.0 to 1.5.1 involves modifying the docker-compose.yml files and recreating the containers.
Update to DSF 1.2.0 first
When upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.5.1_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.5.1.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.5.0
++ image: ghcr.io/datasharingframework/fhir_proxy:1.5.1
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.5.0
++ image: ghcr.io/datasharingframework/fhir:1.5.1
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.5.1_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.5.1.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.5.0
++ image: ghcr.io/datasharingframework/bpe:1.5.1
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.5.1, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.5.1, [...]
Upgrading the DSF from 1.5.0 to 1.5.1 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpdate to DSF 1.2.0 first
\\nWhen upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
\\nUpgrading the DSF from 1.2.0 to 1.3.0 involves modifying the docker-compose.yml files and recreating the containers.
Upgrade from 0.9.x
If you want to migrate from DSF 0.9.x, please follow these instructions.
Update to DSF 1.2.0 first
When upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.3.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.3.0.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.2.0
++ image: ghcr.io/datasharingframework/fhir_proxy:1.3.0
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.2.0
++ image: ghcr.io/datasharingframework/fhir:1.3.0
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.3.0_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.3.0.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.2.0
++ image: ghcr.io/datasharingframework/bpe:1.3.0
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.3.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.3.0, [...]
Upgrading the DSF from 1.2.0 to 1.3.0 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpgrade from 0.9.x
\\nIf you want to migrate from DSF 0.9.x, please follow these instructions.
\\nUpgrading the DSF from 1.0.0 to 1.1.0 involves modifying the docker-compose.yml files and recreating the containers.
Upgrade from 0.9.x
If you want to migrate from DSF 0.9.x, please follow these instructions.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.1.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.1.0
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.0.0
++ image: ghcr.io/datasharingframework/fhir_proxy:1.1.0
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.0.0
++ image: ghcr.io/datasharingframework/fhir:1.1.0
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.1.0_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.1.0
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.0.0
++ image: ghcr.io/datasharingframework/bpe:1.1.0
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.1.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.1.0, [...]
Upgrading the DSF from 1.0.0 to 1.1.0 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpgrade from 0.9.x
\\nIf you want to migrate from DSF 0.9.x, please follow these instructions.
\\nUpgrading the DSF from 1.3.0 to 1.3.1 involves modifying the docker-compose.yml files and recreating the containers.
Update to DSF 1.2.0 first
When upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.3.1_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.3.1.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.3.0
++ image: ghcr.io/datasharingframework/fhir_proxy:1.3.1
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.3.0
++ image: ghcr.io/datasharingframework/fhir:1.3.1
+ restart: on-failure
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.3.1_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.3.1.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.3.0
++ image: ghcr.io/datasharingframework/bpe:1.3.1
+ restart: on-failure
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.3.1, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.3.1, [...]
Upgrading the DSF from 1.3.0 to 1.3.1 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpdate to DSF 1.2.0 first
\\nWhen upgrading from 1.0.0 or 1.1.0 it is important to migrate to DSF 1.2.0 first.
\\nUpgrading the DSF from 1.1.0 to 1.2.0 involves modifying the docker-compose.yml files and recreating the containers.
Upgrade from 0.9.x
If you want to migrate from DSF 0.9.x, please follow these instructions.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.2.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.2.0 and remove the old healthcheck definition. The new healthcheck is defined as part of the docker image.
version: '3.8'
+services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.1.0
++ image: ghcr.io/datasharingframework/fhir_proxy:1.2.0
+ restart: on-failure
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.1.0
++ image: ghcr.io/datasharingframework/fhir:1.2.0
+ restart: on-failure
+- healthcheck:
+- test: ["CMD", "java", "-cp", "dsf_fhir.jar", "dev.dsf.common.status.client.StatusClient"]
+- interval: 10s
+- timeout: 15s
+- retries: 5
+...
app:
+...
+ environment:
+...
+- # TODO specify role configuration to allow access to the UI via web-browser or REST API for specific users, see documentation at dsf.dev
+- DEV_DSF_FHIR_SERVER_ROLECONFIG: |
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.2.0_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.2.0 and remove the old healthcheck definition. The new healthcheck is defined as part of the docker image.
version: '3.8'
+services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.1.0
++ image: ghcr.io/datasharingframework/bpe:1.2.0
+ restart: on-failure
+- healthcheck:
+- test: ["CMD", "java", "-cp", "dsf_bpe.jar", "dev.dsf.common.status.client.StatusClient"]
+- interval: 10s
+- timeout: 15s
+- retries: 5
+...
DSF v1.2.0 is not compatible with the Ping/Pong process plugin v1.0.0.0, upgrade to the Ping/Pong plugin v1.0.1.0 by removing the old jar file and replacing it with the new v1.0.1.0 one.
We have released a new version of the Allow List Process plugin where we added support for delete operations. Please upgrade to the Allow-List process v1.0.0.1 by removing the old jar file and replacing it with the new v1.0.0.1 one.
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.2.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.2.0, [...]
Upgrading the DSF from 1.1.0 to 1.2.0 involves modifying the docker-compose.yml files and recreating the containers.
\\nUpgrade from 0.9.x
\\nIf you want to migrate from DSF 0.9.x, please follow these instructions.
\\nUpgrading the DSF from 1.6.0 to 1.7.0 involves modifying the docker-compose.yml files and recreating the containers. In addition a number of docker secrets and environment variables can be removed if the newly introduced default values are sufficient. The now integrated defaults for trusted certificate authorities include updated configuration for the new certificate authority HARICA used by DFN e.V. and GÉANT TCS. See the Default Root Certificates page for more details and to learn how to configure overrides.
Note: The configurations of trust stores for TLS connections managed by process plugins (for example HTTPS connections to the local KDS FHIR server) have not been modified with this release.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_1.7.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace the version number with 1.7.0 and remove not needed secrets and environment variables.
version: '3.8'
+ services:
+ proxy:
+- image: ghcr.io/datasharingframework/fhir_proxy:1.6.0
++ image: ghcr.io/datasharingframework/fhir_proxy:1.7.0
+ restart: on-failure
+...
+ - ssl_certificate_file.pem
+ - ssl_certificate_key_file.pem
+ - ssl_certificate_chain_file.pem
+- - ssl_ca_certificate_file.pem
+- - ssl_ca_dn_request_file.pem
+ environment:
+...
+ SSL_CERTIFICATE_FILE: /run/secrets/ssl_certificate_file.pem
+ SSL_CERTIFICATE_KEY_FILE: /run/secrets/ssl_certificate_key_file.pem
+ SSL_CERTIFICATE_CHAIN_FILE: /run/secrets/ssl_certificate_chain_file.pem
+- SSL_CA_CERTIFICATE_FILE: /run/secrets/ssl_ca_certificate_file.pem
+- SSL_CA_DN_REQUEST_FILE: /run/secrets/ssl_ca_dn_request_file.pem
+ networks:
+...
+ app:
+- image: ghcr.io/datasharingframework/fhir:1.6.0
++ image: ghcr.io/datasharingframework/fhir:1.7.0
+ restart: on-failure
+...
+ secrets:
+ - db_liquibase.password
+ - db_user.password
+ - db_user_permanent_delete.password
+- - app_client_trust_certificates.pem
+ - app_client_certificate.pem
+ - app_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+- - ssl_ca_certificate_file.pem
+ volumes:
+...
+ environment:
+ TZ: Europe/Berlin
+- DEV_DSF_SERVER_AUTH_TRUST_CLIENT_CERTIFICATE_CAS: /run/secrets/ssl_ca_certificate_file.pem
+ DEV_DSF_FHIR_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+...
+ DEV_DSF_FHIR_DB_USER_PERMANENT_DELETE_PASSWORD_FILE: /run/secrets/db_user_permanent_delete.password
+- DEV_DSF_FHIR_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/app_client_trust_certificates.pem
+ DEV_DSF_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_client_certificate.pem
+...
+ secrets:
+...
+- ssl_ca_certificate_file.pem:
+- file: ./secrets/ssl_ca_certificate_file.pem
+- ssl_ca_dn_request_file.pem:
+- file: ./secrets/ssl_ca_dn_request_file.pem
+...
+- app_client_trust_certificates.pem:
+- file: ./secrets/ssl_root_ca_certificate_file.pem
+...
/opt/fhir
executedocker compose up -d && docker compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_1.7.0_upgrade
Modify the DSF BPE docker-compose.yml file, replace the version number with 1.7.0 and remove not needed secrets and environment variables.
version: '3.8'
+ services:
+ app:
+- image: ghcr.io/datasharingframework/bpe:1.6.0
++ image: ghcr.io/datasharingframework/bpe:1.7.0
+ restart: on-failure
+ secrets:
+ - db_liquibase.password
+ - db_user.password
+ - db_user_camunda.password
+- - app_client_trust_certificates.pem
+ - app_client_certificate.pem
+ - app_client_certificate_private_key.pem
+ - app_client_certificate_private_key.pem.password
+- - ssl_ca_certificate_file.pem
+ volumes:
+...
+ environment:
+ TZ: Europe/Berlin
+- DEV_DSF_SERVER_AUTH_TRUST_CLIENT_CERTIFICATE_CAS: /run/secrets/ssl_ca_certificate_file.pem
+ DEV_DSF_BPE_DB_LIQUIBASE_PASSWORD_FILE: /run/secrets/db_liquibase.password
+...
+ DEV_DSF_BPE_DB_USER_CAMUNDA_PASSWORD_FILE: /run/secrets/db_user_camunda.password
+- DEV_DSF_BPE_FHIR_CLIENT_TRUST_SERVER_CERTIFICATE_CAS: /run/secrets/app_client_trust_certificates.pem
+ DEV_DSF_BPE_FHIR_CLIENT_CERTIFICATE: /run/secrets/app_client_certificate.pem
+...
+ secrets:
+...
+- app_client_trust_certificates.pem:
+- file: ./secrets/ssl_root_ca_certificate_file.pem
+...
+- ssl_ca_certificate_file.pem:
+- file: ./secrets/ssl_ca_certificate_file.pem
+...
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker compose up -d && docker compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 1.7.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 1.7.0, [...]
Upgrading the DSF from 1.6.0 to 1.7.0 involves modifying the docker-compose.yml files and recreating the containers. In addition a number of docker secrets and environment variables can be removed if the newly introduced default values are sufficient. The now integrated defaults for trusted certificate authorities include updated configuration for the new certificate authority HARICA used by DFN e.V. and GÉANT TCS. See the Default Root Certificates page for more details and to learn how to configure overrides.
"}');export{c as comp,h as data}; diff --git a/assets/upgradeFrom7.html-B6PaZf4F.js b/assets/upgradeFrom7.html-B6PaZf4F.js new file mode 100644 index 000000000..8a2eb8486 --- /dev/null +++ b/assets/upgradeFrom7.html-B6PaZf4F.js @@ -0,0 +1,90 @@ +import{_ as s}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as i,a,o as n}from"./app-BIWb5uIp.js";const r={};function l(t,e){return n(),i("div",null,e[0]||(e[0]=[a(`Upgrading the DSF from 0.7.0 to 0.9.0 involves replacing a config file, modifying the docker-compose.yml files, replacing the process plugins and recreating the containers.
If you are upgrading from 0.6.0 please see the Upgrade from 0.6.0 to 0.7.0 guide first.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_0.9.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace 0.7.0 with 0.9.0
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ proxy:
+- image: ghcr.io/highmed/fhir_proxy:0.7.0
++ image: ghcr.io/highmed/fhir_proxy:0.9.0
+ restart: on-failure
+ ports:
+ - 127.0.0.1:80:80
+@@ -27,7 +27,7 @@ services:
+ - app
+
+ app:
+- image: ghcr.io/highmed/fhir:0.7.0
++ image: ghcr.io/highmed/fhir:0.9.0
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_fhir.jar", "org.highmed.dsf.fhir.StatusClient"]
Download prepared DSF FHIR server config files and extract/replace the external FHIR bundle
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_codex_test_fhir_0_9_0.tar.gz
+sudo tar --same-owner -zxvf dsf_codex_test_fhir_0_9_0.tar.gz fhir/conf/bundle.xml
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_highmed_test_fhir_0_9_0.tar.gz
+sudo tar --same-owner -zxvf dsf_highmed_test_fhir_0_9_0.tar.gz fhir/conf/bundle.xml
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_codex_prod_fhir_0_9_0.tar.gz
+sudo tar --same-owner -zxvf dsf_codex_prod_fhir_0_9_0.tar.gz fhir/conf/bundle.xml
cd /opt
+wget https://github.com/highmed/highmed-dsf/wiki/resources/dsf_highmed_prod_fhir_0_9_0.tar.gz
+sudo tar --same-owner -zxvf dsf_highmed_prod_fhir_0_9_0.tar.gz fhir/conf/bundle.xml
The command will update the external FHIR bundle at /opt/fhir/conf/bundle.xml
The output of the tar command should be
fhir/conf/bundle.xml
Upgrade the DSF FHIR containers
From /opt/fhir
execute
docker-compose up -d && docker-compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_0.9.0_upgrade
Modify the DSF BPE docker-compose.yml file
NUM-CODEX (non HiGHmed) instance:
Change the bpe container version from 0.7.0 to 0.9.0, update the process exclude config and remove a bind mount
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ app:
+- image: ghcr.io/highmed/bpe:0.7.0
++ image: ghcr.io/highmed/bpe:0.9.0
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_bpe.jar", "org.highmed.dsf.bpe.StatusClient"]
+@@ -29,9 +29,6 @@ services:
+ - type: bind
+ source: ./log
+ target: /opt/bpe/log
+- - type: bind
+- source: ./last_event
+- target: /opt/bpe/last_event
+ - type: bind
+ source: ./cache
+ target: /opt/bpe/cache
+@@ -48,8 +45,8 @@ services:
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: TODO_ORGANIZATION_IDENTIFIER
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_BASE_URL: https://TODO_DSF_FRIR_SERVER_FQDN/fhir
+ ORG_HIGHMED_DSF_BPE_PROCESS_EXCLUDED: |
+- wwwnetzwerk-universitaetsmedizinde_dataTranslate/0.6.0
+- wwwnetzwerk-universitaetsmedizinde_dataReceive/0.6.0
++ wwwnetzwerk-universitaetsmedizinde_dataTranslate/0.7.0
++ wwwnetzwerk-universitaetsmedizinde_dataReceive/0.7.0
+
+ #TODO modify ORG_HIGHMED_DSF_BPE_PROCESS_EXCLUDED for later process versions
+ #TODO add process specific environment variables, see process documentation
HiGHmed instance:
Change the bpe container version from 0.7.0 to 0.9.0, update the process exclude config, and remove a bind mount
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ app:
+- image: ghcr.io/highmed/bpe:0.7.0
++ image: ghcr.io/highmed/bpe:0.9.0
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_bpe.jar", "org.highmed.dsf.bpe.StatusClient"]
+@@ -29,9 +29,6 @@ services:
+ - type: bind
+ source: ./log
+ target: /opt/bpe/log
+- - type: bind
+- source: ./last_event
+- target: /opt/bpe/last_event
+ - type: bind
+ source: ./psn
+ target: /opt/bpe/psn
+@@ -51,11 +48,11 @@ services:
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: TODO_ORGANIZATION_IDENTIFIER
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_BASE_URL: https://TODO_DSF_FRIR_SERVER_FQDN/fhir
+ ORG_HIGHMED_DSF_BPE_PROCESS_EXCLUDED: |
+- highmedorg_computeFeasibility/0.6.0
+- highmedorg_computeDataSharing/0.6.0
+- highmedorg_updateAllowList/0.6.0
+- wwwnetzwerk-universitaetsmedizinde_dataTranslate/0.6.0
+- wwwnetzwerk-universitaetsmedizinde_dataReceive/0.6.0
++ highmedorg_computeFeasibility/0.7.0
++ highmedorg_computeDataSharing/0.7.0
++ highmedorg_updateAllowList/0.7.0
++ wwwnetzwerk-universitaetsmedizinde_dataTranslate/0.7.0
++ wwwnetzwerk-universitaetsmedizinde_dataReceive/0.7.0
+
+ #TODO modify ORG_HIGHMED_DSF_BPE_PROCESS_EXCLUDED for later process versions
+ #TODO add process specific environment variables, see process documentation
Upgrade DSF Plugins and Process-Plugins
Process plugins in /opt/bpe/process
:
codex-process-data-transfer-0.6.0.jar
with codex-process-data-transfer-0.7.0.jar
from the latest NUM-CODEX processes releasedsf-bpe-process-ping-0.6.0.jar
with dsf-bpe-process-ping-0.7.0.jar
from the latest HiGHmed processes releaseMake sure the process plugins in /opt/bpe/process
are configured with chmod 440
and chown root:bpe
.
Process plugins in /opt/bpe/process
:
codex-process-data-transfer-0.6.0.jar
with codex-process-data-transfer-0.7.0.jar
from the latest NUM-CODEX processes releasedsf-bpe-process-data-sharing-0.6.0.jar
with dsf-bpe-process-data-sharing-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-feasibility-0.6.0.jar
with dsf-bpe-process-feasibility-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-feasibility-mpc-0.6.0.jar
with dsf-bpe-process-feasibility-mpc-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-local-services-0.6.0.jar
with dsf-bpe-process-local-services-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-ping-0.6.0.jar
with dsf-bpe-process-ping-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-update-allow-list-0.6.0.jar
with dsf-bpe-process-update-allow-list-0.7.0.jar
from the latest HiGHmed processes releaseMake sure the process plugins in /opt/bpe/process
are configured with chmod 440
and chown root:bpe
.
Plugins in '/opt/bpe/plugin':
dsf-mpi-client-pdq-0.7.0.jar
, hapi-base-2.3.jar
and hapi-structures-v25-2.3.jar
with the new files from dsf-mpi-client-pdq-0.9.0.zip
, see DSF release notesdsf-openehr-client-impl-0.7.0.jar
with the new file from dsf-openehr-client-impl-0.9.0.zip
, see DSF release notesMake sure the plugins in /opt/bpe/plugin
are configured with chmod 440
and chown root:bpe
.
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker-compose up -d && docker-compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 0.9.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 0.9.0, [...]
Upgrading the DSF from 0.7.0 to 0.9.0 involves replacing a config file, modifying the docker-compose.yml files, replacing the process plugins and recreating the containers.
\\nIf you are upgrading from 0.6.0 please see the Upgrade from 0.6.0 to 0.7.0 guide first.
"}');export{h as comp,o as data}; diff --git a/assets/upgradeFrom8.html-BiXdmn2Q.js b/assets/upgradeFrom8.html-BiXdmn2Q.js new file mode 100644 index 000000000..3a611f6b3 --- /dev/null +++ b/assets/upgradeFrom8.html-BiXdmn2Q.js @@ -0,0 +1,46 @@ +import{_ as r}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as n,b as s,d as i,e as l,f as t,a as o,r as d,o as p}from"./app-BIWb5uIp.js";const h={};function c(g,e){const a=d("RouteLink");return p(),n("div",null,[e[5]||(e[5]=s("p",null,"Upgrading the DSF from 0.8.0 to 0.9.0 involves modifying the docker-compose.yml files, replacing the process plugins and recreating the containers.",-1)),s("p",null,[e[3]||(e[3]=s("strong",null,[i("If you are upgrading from 0.6.0 please see the "),s("a",{href:"https://github.com/highmed/highmed-dsf/wiki/DSF-Upgrade-From-0.6.0-to-0.7.0",target:"_blank",rel:"noopener noreferrer"},"Upgrade from 0.6.0 to 0.7.0 guide"),i(" first and then visit the "),s("a",{href:"https://github.com/highmed/highmed-dsf/wiki/DSF-Upgrade-From-0.7.0-to-0.9.0",target:"_blank",rel:"noopener noreferrer"},"Upgrade from 0.7.0 to 0.9.0 guide"),i(".")],-1)),e[4]||(e[4]=s("br",null,null,-1)),s("strong",null,[e[1]||(e[1]=i("If you are upgrading from 0.7.0 please see the ")),l(a,{to:"/oldstable/releases/upgradeFrom7.html"},{default:t(()=>e[0]||(e[0]=[i("Upgrade from 0.7.0 to 0.9.0 guide")])),_:1}),e[2]||(e[2]=i("."))])]),e[6]||(e[6]=o(`Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_0.9.0_upgrade
Modify the DSF FHIR docker-compose.yml file, replace 0.8.0 with 0.9.0
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ proxy:
+- image: ghcr.io/highmed/fhir_proxy:0.8.0
++ image: ghcr.io/highmed/fhir_proxy:0.9.0
+ restart: on-failure
+ ports:
+ - 127.0.0.1:80:80
+@@ -27,7 +27,7 @@ services:
+ - app
+
+ app:
+- image: ghcr.io/highmed/fhir:0.8.0
++ image: ghcr.io/highmed/fhir:0.9.0
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_fhir.jar", "org.highmed.dsf.fhir.StatusClient"]
Upgrade the DSF FHIR containers
From /opt/fhir
execute
docker-compose up -d && docker-compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_0.9.0_upgrade
Modify the DSF BPE docker-compose.yml file
NUM-CODEX (non HiGHmed) instance:
Change the bpe container version from 0.8.0 to 0.9.0, update the process exclude config and remove a bind mount
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ app:
+- image: ghcr.io/highmed/bpe:0.8.0
++ image: ghcr.io/highmed/bpe:0.9.0
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_bpe.jar", "org.highmed.dsf.bpe.StatusClient"]
HiGHmed instance:
Change the bpe container version from 0.7.0 to 0.9.0, update the process exclude config, and remove a bind mount
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ app:
+- image: ghcr.io/highmed/bpe:0.8.0
++ image: ghcr.io/highmed/bpe:0.9.0
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_bpe.jar", "org.highmed.dsf.bpe.StatusClient"]
+@@ -48,9 +48,9 @@ services:
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_ORGANIZATION_IDENTIFIER_VALUE: TODO_ORGANIZATION_IDENTIFIER
+ ORG_HIGHMED_DSF_BPE_FHIR_SERVER_BASE_URL: https://TODO_DSF_FRIR_SERVER_FQDN/fhir
+ ORG_HIGHMED_DSF_BPE_PROCESS_EXCLUDED: |
+- highmedorg_computeFeasibility/0.6.0
+- highmedorg_computeDataSharing/0.6.0
+- highmedorg_updateAllowList/0.6.0
++ highmedorg_computeFeasibility/0.7.0
++ highmedorg_computeDataSharing/0.7.0
++ highmedorg_updateAllowList/0.7.0
+ wwwnetzwerk-universitaetsmedizinde_dataTranslate/0.7.0
+ wwwnetzwerk-universitaetsmedizinde_dataReceive/0.7.0
Upgrade DSF Plugins and Process-Plugins
Process plugins in /opt/bpe/process
:
codex-process-data-transfer-0.6.0.jar
with codex-process-data-transfer-0.7.0.jar
from the latest NUM-CODEX processes releasedsf-bpe-process-ping-0.6.0.jar
with dsf-bpe-process-ping-0.7.0.jar
from the latest HiGHmed processes releaseMake sure the process plugins in /opt/bpe/process
are configured with chmod 440
and chown root:bpe
.
Process plugins in /opt/bpe/process
:
codex-process-data-transfer-0.6.0.jar
with codex-process-data-transfer-0.7.0.jar
from the latest NUM-CODEX processes releasedsf-bpe-process-data-sharing-0.6.0.jar
with dsf-bpe-process-data-sharing-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-feasibility-0.6.0.jar
with dsf-bpe-process-feasibility-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-feasibility-mpc-0.6.0.jar
with dsf-bpe-process-feasibility-mpc-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-local-services-0.6.0.jar
with dsf-bpe-process-local-services-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-ping-0.6.0.jar
with dsf-bpe-process-ping-0.7.0.jar
from the latest HiGHmed processes releasedsf-bpe-process-update-allow-list-0.6.0.jar
with dsf-bpe-process-update-allow-list-0.7.0.jar
from the latest HiGHmed processes releaseMake sure the process plugins in /opt/bpe/process
are configured with chmod 440
and chown root:bpe
.
Plugins in '/opt/bpe/plugin':
dsf-mpi-client-pdq-0.7.0.jar
, hapi-base-2.3.jar
and hapi-structures-v25-2.3.jar
with the new files from dsf-mpi-client-pdq-0.9.0.zip
, see DSF release notesdsf-openehr-client-impl-0.7.0.jar
with the new file from dsf-openehr-client-impl-0.9.0.zip
, see DSF release notesMake sure the plugins in /opt/bpe/plugin
are configured with chmod 440
and chown root:bpe
.
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker-compose up -d && docker-compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 0.9.0, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 0.9.0, [...]
Upgrading the DSF from 0.8.0 to 0.9.0 involves modifying the docker-compose.yml files, replacing the process plugins and recreating the containers.
\\nIf you are upgrading from 0.6.0 please see the Upgrade from 0.6.0 to 0.7.0 guide first and then visit the Upgrade from 0.7.0 to 0.9.0 guide.
\\nIf you are upgrading from 0.7.0 please see the Upgrade from 0.7.0 to 0.9.0 guide.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_0.9.1_upgrade
Modify the DSF FHIR docker-compose.yml file, replace 0.9.0 with 0.9.1
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ proxy:
+- image: ghcr.io/highmed/fhir_proxy:0.9.0
++ image: ghcr.io/highmed/fhir_proxy:0.9.1
+ restart: on-failure
+ ports:
+ - 127.0.0.1:80:80
+@@ -27,7 +27,7 @@ services:
+ - app
+
+ app:
+- image: ghcr.io/highmed/fhir:0.9.0
++ image: ghcr.io/highmed/fhir:0.9.1
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_fhir.jar", "org.highmed.dsf.fhir.StatusClient"]
Upgrade the DSF FHIR containers
From /opt/fhir
execute
docker-compose up -d && docker-compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_0.9.1_upgrade
Modify the DSF BPE docker-compose.yml file
NUM-CODEX (non HiGHmed) instance:
Change the bpe container version from 0.9.0 to 0.9.1
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ app:
+- image: ghcr.io/highmed/bpe:0.9.0
++ image: ghcr.io/highmed/bpe:0.9.1
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_bpe.jar", "org.highmed.dsf.bpe.StatusClient"]
HiGHmed instance:
Change the bpe container version from 0.9.0 to 0.9.1
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ app:
+- image: ghcr.io/highmed/bpe:0.8.0
++ image: ghcr.io/highmed/bpe:0.9.0
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_bpe.jar", "org.highmed.dsf.bpe.StatusClient"]
Upgrade DSF Plugins and Process-Plugins
* Plugins in '/opt/bpe/plugin':
1. If you are using the provided PDQ MPI Client, replace dsf-mpi-client-pdq-0.7.0.jar
, hapi-base-2.3.jar
and hapi-structures-v25-2.3.jar
with the new files from dsf-mpi-client-pdq-0.9.1.zip
, see DSF release notes
1. If you are using the provided openEHR Client, replace dsf-openehr-client-impl-0.7.0.jar
with the new file from dsf-openehr-client-impl-0.9.1.zip
, see DSF release notes
Make sure the plugins in \`/opt/bpe/plugin\` are configured with \`chmod 440\` and \`chown root:bpe\`.
+
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker-compose up -d && docker-compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 0.9.1, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 0.9.1, [...]
Upgrading the DSF from 0.9.0 to 0.9.1 involves modifying the docker-compose.yml files, replacing the process plugins and recreating the containers.
\\nIf you are upgrading from 0.8.0 please see the Upgrade from 0.8.0 to 0.9.0 guide.
"}');export{m as comp,v as data}; diff --git a/assets/upgradeFrom91.html-BpKtvCHj.js b/assets/upgradeFrom91.html-BpKtvCHj.js new file mode 100644 index 000000000..cd93fe0cb --- /dev/null +++ b/assets/upgradeFrom91.html-BpKtvCHj.js @@ -0,0 +1,26 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const r={};function l(t,e){return n(),s("div",null,e[0]||(e[0]=[a(`Upgrading the DSF from 0.9.1 to 0.9.2 involves modifying the docker-compose.yml files and recreating the containers.
As the upgrade from 0.9.0 to 0.9.1 does not require any changes except the change of the version numbers, you can directly follow the following instructions.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_0.9.2_upgrade
Modify the DSF FHIR docker-compose.yml file, replace 0.9.1 (or 0.9.0) with 0.9.2
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ proxy:
+- image: ghcr.io/highmed/fhir_proxy:0.9.1
++ image: ghcr.io/highmed/fhir_proxy:0.9.2
+ restart: on-failure
+ ports:
+ - 127.0.0.1:80:80
+@@ -27,7 +27,7 @@ services:
+ - app
+
+ app:
+- image: ghcr.io/highmed/fhir:0.9.1
++ image: ghcr.io/highmed/fhir:0.9.2
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_fhir.jar", "org.highmed.dsf.fhir.StatusClient"]
Upgrade the DSF FHIR containers
From /opt/fhir
execute
docker-compose up -d && docker-compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_0.9.2_upgrade
Modify the DSF BPE docker-compose.yml file, replace 0.9.1 (or 0.9.0) with 0.9.2
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ app:
+- image: ghcr.io/highmed/bpe:0.9.1
++ image: ghcr.io/highmed/bpe:0.9.2
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_bpe.jar", "org.highmed.dsf.bpe.StatusClient"]
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker-compose up -d && docker-compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 0.9.2, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 0.9.2, [...]
Upgrading the DSF from 0.9.1 to 0.9.2 involves modifying the docker-compose.yml files and recreating the containers.
\\nAs the upgrade from 0.9.0 to 0.9.1 does not require any changes except the change of the version numbers, you can directly follow the following instructions.
"}');export{o as comp,h as data}; diff --git a/assets/upgradeFrom92.html-CUfALwuj.js b/assets/upgradeFrom92.html-CUfALwuj.js new file mode 100644 index 000000000..0288736a8 --- /dev/null +++ b/assets/upgradeFrom92.html-CUfALwuj.js @@ -0,0 +1,26 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as s,a,o as n}from"./app-BIWb5uIp.js";const r={};function l(t,e){return n(),s("div",null,e[0]||(e[0]=[a(`Upgrading the DSF from 0.9.2 to 0.9.3 involves modifying the docker-compose.yml files and recreating the containers.
As the upgrade from 0.9.0 to 0.9.1 and 0.9.1 to 0.9.2 does not require any changes except the change of the version numbers, you can directly follow the following instructions.
Preparation / Backup
/opt/fhir
directory before proceeding with the upgrade.sudo cp -rp /opt/fhir /opt/fhir_backup_pre_0.9.3_upgrade
Modify the DSF FHIR docker-compose.yml file, replace 0.9.2 (or 0.9.0, 0.9.1) with 0.9.3
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ proxy:
+- image: ghcr.io/highmed/fhir_proxy:0.9.2
++ image: ghcr.io/highmed/fhir_proxy:0.9.3
+ restart: on-failure
+ ports:
+ - 127.0.0.1:80:80
+@@ -27,7 +27,7 @@ services:
+ - app
+
+ app:
+- image: ghcr.io/highmed/fhir:0.9.2
++ image: ghcr.io/highmed/fhir:0.9.3
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_fhir.jar", "org.highmed.dsf.fhir.StatusClient"]
Upgrade the DSF FHIR containers
From /opt/fhir
execute
docker-compose up -d && docker-compose logs -f
Preparation / Backup
/opt/bpe
directory before proceeding with the upgrade.sudo cp -rp /opt/bpe /opt/bpe_backup_pre_0.9.3_upgrade
Modify the DSF BPE docker-compose.yml file, replace 0.9.2 (or 0.9.0, 0.9.1) with 0.9.3
@@ -1,7 +1,7 @@
+ version: '3.8'
+ services:
+ app:
+- image: ghcr.io/highmed/bpe:0.9.2
++ image: ghcr.io/highmed/bpe:0.9.3
+ restart: on-failure
+ healthcheck:
+ test: ["CMD", "java", "-cp", "dsf_bpe.jar", "org.highmed.dsf.bpe.StatusClient"]
Upgrade the DSF BPE containers
From /opt/bpe
execute
docker-compose up -d && docker-compose logs -f
Verify your upgrade:
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-fhir-server-jetty, version: 0.9.3, [...]
INFO main - BuildInfoReaderImpl.logBuildInfo(137) | Artifact: dsf-bpe-server-jetty, version: 0.9.3, [...]
Upgrading the DSF from 0.9.2 to 0.9.3 involves modifying the docker-compose.yml files and recreating the containers.
\\nAs the upgrade from 0.9.0 to 0.9.1 and 0.9.1 to 0.9.2 does not require any changes except the change of the version numbers, you can directly follow the following instructions.
"}');export{o as comp,h as data}; diff --git a/assets/usingTheGitHubMaven.html-Hb3vEwIO.js b/assets/usingTheGitHubMaven.html-Hb3vEwIO.js new file mode 100644 index 000000000..0477498ef --- /dev/null +++ b/assets/usingTheGitHubMaven.html-Hb3vEwIO.js @@ -0,0 +1,38 @@ +import{_ as i}from"./plugin-vue_export-helper-DlAUqK2U.js";import{c as a,a as n,o as t}from"./app-BIWb5uIp.js";const e={};function l(h,s){return t(),a("div",null,s[0]||(s[0]=[n(`For more information take a look at this GitHub documentation about authentication.
In order to install the HiGHmed DSF packages using Maven in your own projects you need a personal GitHub access token. This GitHub documentation shows you how to generate one.
After that, add the following configuration to your local .m2/settings.xml. Replace USERNAME
with your GitHub username and TOKEN
with the previously generated personal GitHub access token. The token needs at least the scope read:packages
.
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
+ http://maven.apache.org/xsd/settings-1.0.0.xsd">
+
+ <activeProfiles>
+ <activeProfile>github</activeProfile>
+ </activeProfiles>
+
+ <profiles>
+ <profile>
+ <id>github</id>
+ <repositories>
+ <repository>
+ <id>github</id>
+ <name>GitHub HiGHmed Apache Maven Packages</name>
+ <url>https://maven.pkg.github.com/highmed/highmed-dsf</url>
+ <releases><enabled>true</enabled></releases>
+ <snapshots><enabled>true</enabled></snapshots>
+ </repository>
+ </repositories>
+ </profile>
+ </profiles>
+
+ <servers>
+ <server>
+ <id>github</id>
+ <username>USERNAME</username>
+ <password>TOKEN</password>
+ </server>
+ </servers>
+</settings>
For more information take a look at this GitHub documentation about package installation.
To install an Apache Maven package from GitHub Packages edit the element dependencies
in the pom.xml file by including the package. This could look as follows to include the dsf-bpe-process-base
package (replace VERSION
with the package version, e.g. 0.4.0-SNAPSHOT
):
<dependencies>
+ <dependency>
+ <groupId>org.highmed.dsf</groupId>
+ <artifactId>dsf-bpe-process-base</artifactId>
+ <version>VERSION</version>
+ </dependency>
+<dependencies>
For more information take a look at this GitHub documentation about authentication.
"}');export{r as comp,d as data}; diff --git a/download/1.7.0/client_cert_ca_chains.pem b/download/1.7.0/client_cert_ca_chains.pem new file mode 100644 index 000000000..13eec0932 --- /dev/null +++ b/download/1.7.0/client_cert_ca_chains.pem @@ -0,0 +1,913 @@ +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST Limited Basic Root CA 1 2019 +-----BEGIN CERTIFICATE----- +MIIETjCCAzagAwIBAgIDD+UpMA0GCSqGSIb3DQEBCwUAMFMxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxLTArBgNVBAMMJEQtVFJVU1QgTGltaXRl +ZCBCYXNpYyBSb290IENBIDEgMjAxOTAeFw0xOTA2MTkwODE1NTFaFw0zNDA2MTkw +ODE1NTFaMFMxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxLTAr +BgNVBAMMJEQtVFJVU1QgTGltaXRlZCBCYXNpYyBSb290IENBIDEgMjAxOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN81X083S74JbInkPxAL5tQg5SOF +ttjX/rviq7s4HG4zBvUF4KgwqXysC+mA5nRwEkFXnI6ZQTB8M0DI6vSBnpAOghZN +QgXFu07WsQWOTTlywst138t1t6YU8QPUVb1UbxiVu4WtycFaq98Rbfcsu6YIoENB +gjeXZRJPzxPhIf1oLtSkgBihX/7eVxZdVRGzAtuMZP9TqI3bQpZ1yY/7Od54ra4Q +SPy2La4VOqUSAyS2yRreugLL8aqSt7dh+YsDSgtHEn2HgrunptIFN45kVM4PEOHx +/gi5lpZ+pKRmLuoXMmvBUwa/HlySSV7bVv6xfHFZjjs3YjXvZh+8StvEY9cCAwEA +AaOCASkwggElMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFONo4hcITMmOHGJE +DKkpkQJiC6OTMA4GA1UdDwEB/wQEAwIBBjCB4gYDVR0fBIHaMIHXMIGJoIGGoIGD +hoGAbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwTGlt +aXRlZCUyMEJhc2ljJTIwUm9vdCUyMENBJTIwMSUyMDIwMTksTz1ELVRydXN0JTIw +R21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwSaBHoEWGQ2h0dHA6 +Ly9jcmwuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3RfbGltaXRlZF9iYXNpY19yb290 +X2NhXzFfMjAxOS5jcmwwDQYJKoZIhvcNAQELBQADggEBALzcGA/9SQuKkkdFSUT+ +8mU4RTV7PWB3DpmqNt9kcB00oYqohwCyRXJygjcN/lNHi4828us0H7DtGdl2CQp4 +WbTcWtdBbjwaU0XH/FXdtxgo9BzM/VVfFUZUai8CtlDn6fJjLhVmPWQtX1EByEe/ +ulEwyxHipVD5pI1dY+ctdqXtWZ+HsudvZC5a/CFS/hElq2yTlS2SuKeTovGGM8GB +Y+XI16N3w/ItEjnQJJNPxPRfNjQdvhicaujXEOErHP8UGWgCJ+aDGboSq2dVbczE +m4DnKPXpWydVWZLI9d6a1RUWwmB9GD1/JKvxPThbwkHnWixlLkSKKr7uMaiWGaCX +oVQ= +-----END CERTIFICATE----- +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST Limited Basic CA 1-2 2019 +-----BEGIN CERTIFICATE----- +MIIFuTCCBKGgAwIBAgIDD+VKMA0GCSqGSIb3DQEBCwUAMFMxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxLTArBgNVBAMMJEQtVFJVU1QgTGltaXRl +ZCBCYXNpYyBSb290IENBIDEgMjAxOTAeFw0xOTA4MjAxMjMyMjJaFw0zNDA2MTkw +ODE1NTFaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAo +BgNVBAMMIUQtVFJVU1QgTGltaXRlZCBCYXNpYyBDQSAxLTIgMjAxOTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBANxlUGXW81Y2JG/BtEO5dlbELYat4Zx9 +5b4RUux5scPTZX3wrEW+PK4EwQCvV8FH0SoDatOJcFiGduX2r29c0aFFyVKu6xHF +DApYNYV99+z5TiqXFdVkOUti56r10KsaO3FkcgAt4wDFgYd0dDseYo2SQqpKeqFR +QMVQVdLCt66yU8qbiaZ/sL2pcNsJMD/DkEV/axpTwzzk6H+kGUIJ+jpKpYw2pMFF +wYlqW91ICfLtTHvJqFb3DZ7yFNSiXgYBYH9R142vjflh1vg+GuqORiTLi/AhIjlb +3XUAFIZzJ77+PLQprYlRHGGBMaJ+3VbI+hWPTHpwVt6wHNVcfHUnA3kCAwEAAaOC +ApcwggKTMB8GA1UdIwQYMBaAFONo4hcITMmOHGJEDKkpkQJiC6OTMIIBMgYIKwYB +BQUHAQEEggEkMIIBIDBABggrBgEFBQcwAYY0aHR0cDovL2xpbWl0ZWQtYmFzaWMt +cm9vdC1jYS0xLTIwMTkub2NzcC5kLXRydXN0Lm5ldDBTBggrBgEFBQcwAoZHaHR0 +cDovL3d3dy5kLXRydXN0Lm5ldC9jZ2ktYmluL0QtVFJVU1RfTGltaXRlZF9CYXNp +Y19Sb290X0NBXzFfMjAxOS5jcnQwgYYGCCsGAQUFBzAChnpsZGFwOi8vZGlyZWN0 +b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBMaW1pdGVkJTIwQmFzaWMlMjBS +b290JTIwQ0ElMjAxJTIwMjAxOSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y0FDZXJ0 +aWZpY2F0ZT9iYXNlPzAYBgNVHSAEETAPMA0GCysGAQQBpTQCg3QBMIHcBgNVHR8E +gdQwgdEwgc6ggcuggciGgYBsZGFwOi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NO +PUQtVFJVU1QlMjBMaW1pdGVkJTIwQmFzaWMlMjBSb290JTIwQ0ElMjAxJTIwMjAx +OSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlz +dIZDaHR0cDovL2NybC5kLXRydXN0Lm5ldC9jcmwvZC10cnVzdF9saW1pdGVkX2Jh +c2ljX3Jvb3RfY2FfMV8yMDE5LmNybDAdBgNVHQ4EFgQU0A0+3Aiv40EIZuDc8vqZ +ai3fGLkwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwDQYJKoZI +hvcNAQELBQADggEBAH8NXqPrIcKiZC51vfxvajB1HhFnRFFN/G3ZU4yR7XI+uGec +DjR8tOHdFYFmZG4qbDl70ZuRG4bs6H8cvfWyo1NmWZqjAkr6o1kIRTnFwn4JsssJ +7HR2RmJ4ar0C9miIk9sTNLwKy1/kBvCFqssdKdQwBSi85KRxPFYvv+vnMCvSL0Ob ++65q6V7QzvCk7ojiSrcfvHS8QnHJE9ReFRKD4KXAd7+OcZc1K3Mf+uNNHt3CP3ie +DN9K90sI81IWucEeN2NYvw/tJNDH5L4Ah3cn8XzxQVzOfAnn1isf2pci1IEj5f3Y +9JA7LYLLeH7n4+E5JWRiIUAhqNhQTchmwKLdR+E= +-----END CERTIFICATE----- +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST Limited Basic CA 1-3 2019 +-----BEGIN CERTIFICATE----- +MIIFvzCCBKegAwIBAgIDD+UsMA0GCSqGSIb3DQEBCwUAMFMxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxLTArBgNVBAMMJEQtVFJVU1QgTGltaXRl +ZCBCYXNpYyBSb290IENBIDEgMjAxOTAeFw0xOTA2MjAxMTQ2NDlaFw0zNDA2MTkw +ODE1NTFaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAo +BgNVBAMMIUQtVFJVU1QgTGltaXRlZCBCYXNpYyBDQSAxLTMgMjAxOTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAL7RBVVVynr23OCuKpkrzJK87yspTQVh +vJRTtHS7zXG0jzpNLUV8D6Qjd3nOVs2VnW+rpfgQmFeic3E8G5jJmGv18O6LOGJS +CXbiF2eonBQ0oTF97oVukj/8SRYWEBjeBjB//U3gwdt14qATw1zk3B8K/u0zxtQ0 +1xMTL5ckilQ7/+x+RVCGanvx/FscQ1He0fkhfLgqaDJimWu7b0phwZqyyI1GEjEN +9FLWJxh4OeIzupAT4b1j5oaXHCY4BGT5zk6PbOYC7U2Jnt4zKU+pJVIJ+EhYOJS5 +tQM7YFG/eECAHcBtgdUIDgBZqrrx+k14aSQcW701sGqQT7cKcLen+yMCAwEAAaOC +Ap0wggKZMB8GA1UdIwQYMBaAFONo4hcITMmOHGJEDKkpkQJiC6OTMIIBMgYIKwYB +BQUHAQEEggEkMIIBIDBABggrBgEFBQcwAYY0aHR0cDovL2xpbWl0ZWQtYmFzaWMt +cm9vdC1jYS0xLTIwMTkub2NzcC5kLXRydXN0Lm5ldDBTBggrBgEFBQcwAoZHaHR0 +cDovL3d3dy5kLXRydXN0Lm5ldC9jZ2ktYmluL0QtVFJVU1RfTGltaXRlZF9CYXNp +Y19Sb290X0NBXzFfMjAxOS5jcnQwgYYGCCsGAQUFBzAChnpsZGFwOi8vZGlyZWN0 +b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBMaW1pdGVkJTIwQmFzaWMlMjBS +b290JTIwQ0ElMjAxJTIwMjAxOSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y0FDZXJ0 +aWZpY2F0ZT9iYXNlPzAYBgNVHSAEETAPMA0GCysGAQQBpTQCg3QBMIHiBgNVHR8E +gdowgdcwgYmggYaggYOGgYBsZGFwOi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NO +PUQtVFJVU1QlMjBMaW1pdGVkJTIwQmFzaWMlMjBSb290JTIwQ0ElMjAxJTIwMjAx +OSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlz +dDBJoEegRYZDaHR0cDovL2NybC5kLXRydXN0Lm5ldC9jcmwvZC10cnVzdF9saW1p +dGVkX2Jhc2ljX3Jvb3RfY2FfMV8yMDE5LmNybDAdBgNVHQ4EFgQUZeInnG91+3Vn +CLczeuOdjnssKIgwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAw +DQYJKoZIhvcNAQELBQADggEBAJLvDm1ZuBMhO2qR2R4H/GDDYETRBNqvtvWHRvBw +vryhtDm7tovqDg2v7x+vcSqZApFVW+zs+OvzNRXtyyIlkqxP5CL0okpHqqmKaaHn +tH8D93pV/p7xE39gFE6NNceSx6DHBxuOcOEha8zA8ixH+j+fzLX6SEOAhBIfCgDb +qg9Xtxi7+uupq4koQcXrDNTRHxuoNxAHnwYtgapwKwyBwMMaLliqxVyDcwN6aJEQ +tVoyOibUnUXej4bXh8FPCSU7m98nLY9aKk1O30jsgSiLxuVKBav8JaDPY1i69CSd +vn2adNlHjbl/57GVQ4VoTDzlmhRQGR9TIRPYA5F6VdeI4ak= +-----END CERTIFICATE----- +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST Root Class 3 CA 2 2009 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST SSL Class 3 CA 1 2009 +-----BEGIN CERTIFICATE----- +MIIFMjCCBBqgAwIBAgIDCZBjMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMTIxMjQ2NTVaFw0yOTExMDUwODM1NTha +MEwxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJjAkBgNVBAMM +HUQtVFJVU1QgU1NMIENsYXNzIDMgQ0EgMSAyMDA5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAoal0SyLSijE0JkuhHJmOCbmQznyxuSY7DaEwhUsdUpI+ +2llkDLz6s9BWQe1zCVXDhrt3qz5U5H4h6jxm5Ec+ZbFiU3Gv2yxpI5cRPrqj9mJU +1CGgy1+29khuUnoopzSq66HPuGZGh06I7bJkXTQ7AQ92z1MdL2wATj1UWdNid3sQ +NiWIm+69nURHY6tmCNenNcG6aV4qjHMUPsjpCRabNY9nUO12rsmiDW2mbAC3gcxQ +lqLgLYur9HvB8cW0xu2JZ/B3PXmNphVuWskp3Y1u0SvIYzuEsE7lWDbBmtWZtabB +hzThkDQvd+3keQ1sU/beq1NeXfgKzQ5G+4Ql2PUY/wIDAQABo4ICGjCCAhYwHwYD +VR0jBBgwFoAU/doUxJ8w3iG9HkI5/KtjI0ng8YQwRAYIKwYBBQUHAQEEODA2MDQG +CCsGAQUFBzABhihodHRwOi8vcm9vdC1jMy1jYTItMjAwOS5vY3NwLmQtdHJ1c3Qu +bmV0MF8GA1UdIARYMFYwVAYEVR0gADBMMEoGCCsGAQUFBwIBFj5odHRwOi8vd3d3 +LmQtdHJ1c3QubmV0L2ludGVybmV0L2ZpbGVzL0QtVFJVU1RfUm9vdF9QS0lfQ1BT +LnBkZjAzBgNVHREELDAqgRBpbmZvQGQtdHJ1c3QubmV0hhZodHRwOi8vd3d3LmQt +dHJ1c3QubmV0MIHTBgNVHR8EgcswgcgwgYCgfqB8hnpsZGFwOi8vZGlyZWN0b3J5 +LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwMjAwOSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZv +Y2F0aW9ubGlzdDBDoEGgP4Y9aHR0cDovL3d3dy5kLXRydXN0Lm5ldC9jcmwvZC10 +cnVzdF9yb290X2NsYXNzXzNfY2FfMl8yMDA5LmNybDAdBgNVHQ4EFgQUUBkylJrE +tQRNVtDAgyHVNVWwsXowDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBABM5QRHX/yInsmZLWVlvmWmKb3c4IB3hAIVR +sAGhkvQJ/RD1GZjZUBBYMWkD1P37fTQxlqTOe3NecVvElkYZuCq7HSM6o7awzb3m +yLn1kN+hDCsxX0EYbVSNjEjkW3QEkqJH9owH4qeMDxf7tfXB7BVKO+rarYPa2PR8 +Wz2KhjFDmAeFg2J89YcpeJJEEJXoweAkgJEEwwEIfJ2yLjYo78RD0Rvij/+zkfj9 ++dSvTiZTuqicyo37qNoYHgchuqXnKodhWkW89oo2NKhfeNHHbqvXEJmx0PbI6YyQ +50GnYECZRHNKhgbPEtNy/QetU53aWlTlvu4NIwLW5XVsrxlQ2Zw= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA Client ECC Root CA 2021 +-----BEGIN CERTIFICATE----- +MIICWjCCAeGgAwIBAgIQMWjZ2OFiVx7SGUSI5hB98DAKBggqhkjOPQQDAzBvMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBFQ0Mg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTExMDMzNFoXDTQ1MDIxMzExMDMzM1owbzEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJzAlBgNVBAMMHkhBUklDQSBDbGllbnQgRUND +IFJvb3QgQ0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABAcYrZWWlNBcD4L3 +KkD6AsnJPTamowRqwW2VAYhgElRsXKIrbhM6iJUMHCaGNkqJGbcY3jvoqFAfyt9b +v0mAFdvjMOEdWscqigEH/m0sNO8oKJe8wflXhpWLNc+eWtFolaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUUgjSvjKBJf31GpfsTl8au1PNkK0wDgYDVR0P +AQH/BAQDAgGGMAoGCCqGSM49BAMDA2cAMGQCMEwxRUZPqOa+w3eyGhhLLYh7WOar +lGtEA7AX/9+Cc0RRLP2THQZ7FNKJ7EAM7yEBLgIwL8kuWmwsHdmV4J6wuVxSfPb4 +OMou8dQd8qJJopX4wVheT/5zCu8xsKsjWBOMi947 +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=GEANT S/MIME ECC 1 +-----BEGIN CERTIFICATE----- +MIIDdjCCAvugAwIBAgIQDJz22V5XPKzFUHJkSyvftjAKBggqhkjOPQQDAzBvMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBFQ0Mg +Um9vdCBDQSAyMDIxMB4XDTI1MDEwMzExMTE0MFoXDTM5MTIzMTExMTEzOVowYzEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExGzAZBgNVBAMMEkdFQU5UIFMvTUlNRSBFQ0Mg +MTB2MBAGByqGSM49AgEGBSuBBAAiA2IABHHzZBx8BXADofSXSxbumNUgOW7oPUFD +MTBf97xIFnUvxGf449zwH17IBxs6EnsIRSZ80+3fMPGm7PcW4bogQmpStkHsbhl/ +yc+R0YudJcnXnC+HuGwkKlk9hoXX+gtBJ6OCAWYwggFiMBIGA1UdEwEB/wQIMAYB +Af8CAQAwHwYDVR0jBBgwFoAUUgjSvjKBJf31GpfsTl8au1PNkK0wUAYIKwYBBQUH +AQEERDBCMEAGCCsGAQUFBzAChjRodHRwOi8vY3J0LmhhcmljYS5nci9IQVJJQ0Et +Q2xpZW50LVJvb3QtMjAyMS1FQ0MuY2VyMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8G +CCsGAQUFBwIBFiNodHRwOi8vcmVwby5oYXJpY2EuZ3IvZG9jdW1lbnRzL0NQUzAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwRQYDVR0fBD4wPDA6oDigNoY0 +aHR0cDovL2NybC5oYXJpY2EuZ3IvSEFSSUNBLUNsaWVudC1Sb290LTIwMjEtRUND +LmNybDAdBgNVHQ4EFgQUccTTotYuiaWImnarwbliZz8kZeswDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2kAMGYCMQDbRE4Sf4j5cdd9PlC4xjnNvJxfsDX5ouYb +3ffJ6ukmEjI7RnHm6xJ2V++40nWYVfgCMQDfXaawZpZymK4CBIMHxoViSYBHw/Mm +JOG3trrP+Q4Kb0AfJb/S2ojAD+EAKiiB5hM= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA S/MIME ECC +-----BEGIN CERTIFICATE----- +MIIDejCCAwGgAwIBAgIQR+QjpXN0lyzfL5Q4f0wP1DAKBggqhkjOPQQDAzBvMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBFQ0Mg +Um9vdCBDQSAyMDIxMB4XDTIxMDMxOTA5MzY1OFoXDTM2MDMxNTA5MzY1N1owYjEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExGjAYBgNVBAMMEUhBUklDQSBTL01JTUUgRUND +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEbidoUrQSTETj3yKvZfP8hz4cjWJYLu2S +BQVAorbEw3a9bbiaYGSreAJJamuTKI/8EiGjOZ3gAxlWZirpKofebc+lqDa87zjh +E88mIhT+WYBu4S/0+lb0r59quKF8Y0pHo4IBbTCCAWkwEgYDVR0TAQH/BAgwBgEB +/wIBADAfBgNVHSMEGDAWgBRSCNK+MoEl/fUal+xOXxq7U82QrTBXBggrBgEFBQcB +AQRLMEkwRwYIKwYBBQUHMAKGO2h0dHA6Ly9yZXBvLmhhcmljYS5nci9jZXJ0cy9I +QVJJQ0EtQ2xpZW50LVJvb3QtMjAyMS1FQ0MuY2VyMEQGA1UdIAQ9MDswOQYEVR0g +ADAxMC8GCCsGAQUFBwIBFiNodHRwOi8vcmVwby5oYXJpY2EuZ3IvZG9jdW1lbnRz +L0NQUzAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwRQYDVR0fBD4wPDA6 +oDigNoY0aHR0cDovL2NybC5oYXJpY2EuZ3IvSEFSSUNBLUNsaWVudC1Sb290LTIw +MjEtRUNDLmNybDAdBgNVHQ4EFgQUTq8v2Rh752P2xakBhmVdLF4791YwDgYDVR0P +AQH/BAQDAgGGMAoGCCqGSM49BAMDA2cAMGQCMEayoDfZOkZgvT13XbjuSpKc2m/C +cEQqYDwGIyXBhZqyMoMyDcthQsiEwBW3lHT5IQIwIT2kzroVW2iWhRF3vaTEAo3m ++AosNW84YGle0MMG2SDIoQEJiqhjRfQwZHHeBzCE +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA Client RSA Root CA 2021 +-----BEGIN CERTIFICATE----- +MIIFqjCCA5KgAwIBAgIQVVL4HtsbJCyeu5YYzQIoPjANBgkqhkiG9w0BAQsFADBv +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBS +U0EgUm9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTg0NloXDTQ1MDIxMzEwNTg0NVow +bzELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBS +ZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExJzAlBgNVBAMMHkhBUklDQSBDbGllbnQg +UlNBIFJvb3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AIHbV0KQLHQ19Pi4dBlNqwlad0WBc2KwNZ/40LczAIcTtparDlQSMAe8m7dI19EZ +g66O2KnxqQCEsIxenugMj1Rpv/bUCE8mcP4YQWMaszKLQPgHq1cx8MYWdmeatN0v +8tFrxdCShJFxbg8uY+kfU6TdUhPMCYMpgQzFU3VEsQ5nUxjQwx+IS5+UJLQpvLvo +Tv1v0hUdSdyNcPIRGiBRVRG6iG/E91B51qox4oQ9XjLIdypQceULL+m26u+rCjM5 +Dv2PpWdDgo6YaQkJG0DNOGdH6snsl3ES3iT1cjzR90NMJveQsonpRUtVPTEFekHi +lbpDwBfFtoU9GY1kcPNbrM2f0yl1h0uVZ2qm+NHdvJCGiUMpqTdb9V2wJlpTQnaQ +K8+eVmwrVM9cmmXfW4tIYDh8+8ULz3YEYwIzKn31g2fn+sZD/SsP1CYvd6QywSTq +ZJ2/szhxMUTyR7iiZkGh+5t7vMdGanW/WqKM6GpEwbiWtcAyCC17dDVzssrG/q8R +chj258jCz6Uq6nvWWeh8oLJqQAlpDqWW29EAufGIbjbwiLKd8VLyw3y/MIk8Cmn5 +IqRl4ZvgdMaxhZeWLK6Uj1CmORIfvkfygXjTdTaefVogl+JSrpmfxnybZvP+2M/u +vZcGHS2F3D42U5Z7ILroyOGtlmI+EXyzAISep0xxq0o3AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFKDWBz1eJPd7oEQuJFINGaorBJGnMA4GA1Ud +DwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEADUf5CWYxUux57sKo8mg+7ZZF +yzqmmGM/6itNTgPQHILhy9Pl1qtbZyi8nf4MmQqAVafOGyNhDbBX8P7gyr7mkNuD +LL6DjvR5tv7QDUKnWB9p6oH1BaX+RmjrbHjJ4Orn5t4xxdLVLIJjKJ1dqBp+iObn +K/Es1dAFntwtvTdm1ASip62/OsKoO63/jZ0z4LmahKGHH3b0gnTXDvkwSD5biD6q +XGvWLwzojnPCGJGDObZmWtAfYCddTeP2Og1mUJx4e6vzExCuDy+r6GSzGCCdRjVk +JXPqmxBcWDWJsUZIp/Ss1B2eW8yppRoTTyRQqtkbbbFA+53dWHTEwm8UcuzbNZ+4 +VHVFw6bIGig1Oq5l8qmYzq9byTiMMTt/zNyW/eJb1tBZ9Ha6C8tPgxDHQNAdYOkq +5UhYdwxFab4ZcQQk4uMkH0rIwT6Z9ZaYOEgloRWwG9fihBhb9nE1mmh7QMwYXAwk +ndSV9ZmqRuqurL/0FBkk6Izs4/W8BmiKKgwFXwqXdafcfsD913oY3zDROEsfsJhw +v8x8c/BuxDGlpJcdrL/ObCFKvicjZ/MGVoEKkY624QMFMyzaNAhNTlAjrR+lxdR6 +/uoJ7KcoYItGfLXqm91P+edrFcaIz0Pb5SfcBFZub0YV8VYt6FwMc8MjgTggy8kM +ac8sqzuEYDMZUv1pFDM= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=GEANT S/MIME RSA 1 +-----BEGIN CERTIFICATE----- +MIIGRDCCBCygAwIBAgIQFfmubKqNLtTTb3h/Htx7ATANBgkqhkiG9w0BAQsFADBv +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBS +U0EgUm9vdCBDQSAyMDIxMB4XDTI1MDEwMzExMTMwOFoXDTM5MTIzMTExMTMwN1ow +YzELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBS +ZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExGzAZBgNVBAMMEkdFQU5UIFMvTUlNRSBS +U0EgMTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKu4bq/+byKjHo25 +Xz32YBmO+Wrkmc+UmfcdXSCI7yawwU9JSMEHAAKAASaJpLr9JAyt+tlB/rn/Sazn +SwY4ipBIffR0D5k/ndfiI553dWgI4i/tkOGlNej/7JyE2CS9kTlOOs6pg5HaDpwq +jAhCkje+IByg5gKWH6lzvMJo5jQOtsGB2q6e5cYKwa9LJOAcR8iquds9LFssbHSM +uVdSuTjpAjcGLqWfW++C0YXpWD+UonjQ6lNEuiKUDmrFc+SEtLw56lYtp4uuxm4L +W/HQSsx+oGwMBqaR6HhBQ3LydONjsbcbegRqJZFJoLsnwIHorEag44UIvjXzYJAx +/NTiwVdHldO7cEvWscDbyQLR9koBoliq2HrgYFQs7NQxU+7MLNSh8i6znWVNISUE +g36M//I8BZl4VqD70ELlhKKN7rx+i7BwKOd2gxdWgFJhkPyQu9o+82R9epXiRblo +/rdkyv+2BFR7VpbgPUzncdi8/0h4dP/qQFYnA+Df0FFj7gYczwIDAQABo4IBZjCC +AWIwEgYDVR0TAQH/BAgwBgEB/wIBADAfBgNVHSMEGDAWgBSg1gc9XiT3e6BELiRS +DRmqKwSRpzBQBggrBgEFBQcBAQREMEIwQAYIKwYBBQUHMAKGNGh0dHA6Ly9jcnQu +aGFyaWNhLmdyL0hBUklDQS1DbGllbnQtUm9vdC0yMDIxLVJTQS5jZXIwRAYDVR0g +BD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9yZXBvLmhhcmljYS5n +ci9kb2N1bWVudHMvQ1BTMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDBF +BgNVHR8EPjA8MDqgOKA2hjRodHRwOi8vY3JsLmhhcmljYS5nci9IQVJJQ0EtQ2xp +ZW50LVJvb3QtMjAyMS1SU0EuY3JsMB0GA1UdDgQWBBTrsi87/a4CzCpEBl0lzR0S +ImiwRzAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADveuEX23Dwr +kygKtsF7DmcTGmi8SE20jmJLe0TMT8Nws1NqppE0ACym1agtY1IjUFm5MWabG/Ic +vRTh8sB9cRZgDQMqZLNCLofqL4aj/dKBXH4bwH2MVdjNHBoGvZkyhRz/kBE+x1va +WXclhWQMOX5nVvRMfiEJiYotMP7KM88IaVZ9DkGJJEVftsnUWuvCWUtjagD6XWlq +LHjNl+LufiZ/h9lDvaWqG1/obfdStgofMc30RL+ES6gYKRwZpCA1coFzXV7Cnwx8 +toTl8bReqCNXexKzxlqAcRXPOmlKkJQuqRI297oNuMPnoNZCY+yLnxyd4kZuu0Xc +OTNTpVjM8bvg8ACqhSYanrNDi/zTiTk7gwm9GyH1X45fFNGNEFgpIaApjT2UELuk +DOmP18ZwC4EQeHawPJIqffMEmUJm6qbRPKGnNmcyygh4iZU3QbkRLLp3Z6QV3WoT +Eqyf5mL9qTGS6WJG65L8oaKw1Xh/bdGuVIDyBahpfP2c2pCd0UH6+x73Rrq9GFlO +ijVr2OQSvKhzETNG917SvcURCBhMnIQFUXqHQyIY60eH1po6WtNOq/1K5kpOG6Sq +1RVc02LEit48uK4tRMVUKekSOjruGXW38DmAriPcMHjI6VQbqjc0Sq1VPz76ee4F +M5uLviSUZHYqDDqMWa8LFImK9iiKI8E3 +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA S/MIME RSA +-----BEGIN CERTIFICATE----- +MIIGyjCCBLKgAwIBAgIQKU8NCxll7jtXyXGT7O5U0zANBgkqhkiG9w0BAQsFADBv +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBS +U0EgUm9vdCBDQSAyMDIxMB4XDTIxMDMxOTA5MzczOFoXDTM2MDMxNTA5MzczN1ow +YjELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBS +ZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExGjAYBgNVBAMMEUhBUklDQSBTL01JTUUg +UlNBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA5TUrmNU21Y8gpvrx +XCCOJS54uCyv6H6HQ1NCCz2FCg04VaNEuRYFGSXbxC88u/Q3M/MfegErs6MlLg5j +gaAD7UYRpFMs/rS6Z6Pr45LDaWHTsS7gjhYaq47+iO0+17eTc7VWeTFzZji3ocFA +CiXc+tEDRXSGal+qu4s6/Zmexmtp3P8QodrLgy6mAXO7necfkCoyBVjUcixIvPeR +1NeFWfy2io7QU+Wf4Wm18259W+oJgxH0mgfFKgGrz/c2BokxzdXFvuKggVWEh/p0 +jy8uRQOGcMRT2fyUh5LM6zRBGObE8xIiRfforJF2CGSu1B0wj0jCmFG8Z2UN55xk +EH+HKpQlm+S0mvez7YYUqMobN1RjTWHSZQoX2nuYk0no/cxbUl/AVXE0fq3DkNv4 +fdTGfLkTJ42qdyAIpjEjkLgrb4C4CT2IxPuNCLCE25SnVmydA7CuMmbwfljWwkE4 +JyO23QL90baDIcEg/NYp/IueW9qu8YwCX5ipJhflzqnjONFKDlC93FnoQ/i/ALIk +MOhjCrHePX3/F7OWXWAoIRdbqoCNvN4KnJfcFJCcPDOGY7qds5cMcII3uKhhsRCQ +Jg2KKGvVdwp1oBIibuVM+cIZi3QucK8TfssODws1WNce5RV+Gniv5U4dhS+5nPvC +gQr6a8wL4ha8hc8ovXEhrrBHtOMCAwEAAaOCAW0wggFpMBIGA1UdEwEB/wQIMAYB +Af8CAQAwHwYDVR0jBBgwFoAUoNYHPV4k93ugRC4kUg0ZqisEkacwVwYIKwYBBQUH +AQEESzBJMEcGCCsGAQUFBzAChjtodHRwOi8vcmVwby5oYXJpY2EuZ3IvY2VydHMv +SEFSSUNBLUNsaWVudC1Sb290LTIwMjEtUlNBLmNlcjBEBgNVHSAEPTA7MDkGBFUd +IAAwMTAvBggrBgEFBQcCARYjaHR0cDovL3JlcG8uaGFyaWNhLmdyL2RvY3VtZW50 +cy9DUFMwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMEMEUGA1UdHwQ+MDww +OqA4oDaGNGh0dHA6Ly9jcmwuaGFyaWNhLmdyL0hBUklDQS1DbGllbnQtUm9vdC0y +MDIxLVJTQS5jcmwwHQYDVR0OBBYEFJX2FFz9LqSMQ6C3jgW0Orbsgg2fMA4GA1Ud +DwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAL2XgP79Wk/ijkQOI55UihqPR +m68QXb1IDKYCmQiJFkqNQOf/Bt10RjtASV2kjDEQoI4cnmwB++Pd8A9kyFdWPwJ2 +bX68XvFvHX6FzejNwJ5b6wIiz1W3QlHZwhTSoV0KHh4H4gzD8N9X4HwfESVYRkQF +3tTg1VnZJI2ggCg5201X3BI6YLDc4GltF5EORVzT5xPpMiNHxDivoP8tEdijw5Jh +kpCDG8BbpbfXQ1HjPRawQ4w6w1aXgyhajLAhjZdLOfpmZO5EbYai46m/niK5mKTU +/RFupYhNsejfD5wm6JEZbisV5SXVPbmIXrTwvt66lGvgSb3X7waCFIVuwr71qc/q +YNyvXA4ALSchFU8xqWDu2StWrB3i3CzNR2DMz18yYpjeJehuuhsIZu+Ku4CML1O3 +nlKCXx3EbABVAJkXhbDqHnUw0kqPHh9enYcXdJ8zjLhHVBghdLfpOmp9ZtIOjquL +mi+OEwkhOv6bC4IXpY57c5bBzK9DCoffE1LG/JUfOSE0B6kuTVyXvFhfq7BK5LcJ +WieXlSD3COOcqBQ62eHKpRPEYOLdAHDdOq6MBHHMAVUkB/pAFgDj0twB+xP2FUUk +ycRBPSiBgQEnuI8QLrQXB3FDSXqh+I8jfUcVc7JvgvG4bLMjozgFB58A7gJ5YDce +4P4ckMdKzbAXPa5tY2Y= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA TLS ECC Root CA 2021 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=GEANT TLS ECC 1 +-----BEGIN CERTIFICATE----- +MIIDNzCCArygAwIBAgIQQv3c4SYWB+Gl5pNaQAFh3TAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTI1MDEwMzExMTQyMVoXDTM5MTIzMTExMTQyMFowYDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExGDAWBgNVBAMMD0dFQU5UIFRMUyBFQ0MgMTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABANPWLwh0Za2UqtbLV7/qNRm78zsttgSuvhn73bU +GtxETsVOEZeMUfMjgHw8EwrsSJI9oj0CgZQFFSEY1NJfcxA/NJiOYJUKPsFbpOrY +dr0q4g+aBZsXWeh7bMCzx24g/aOCAS0wggEpMBIGA1UdEwEB/wQIMAYBAf8CAQAw +HwYDVR0jBBgwFoAUyRtTgRL+BNUW0aq8mm+3oJUZbsowTQYIKwYBBQUHAQEEQTA/ +MD0GCCsGAQUFBzAChjFodHRwOi8vY3J0LmhhcmljYS5nci9IQVJJQ0EtVExTLVJv +b3QtMjAyMS1FQ0MuY2VyMBEGA1UdIAQKMAgwBgYEVR0gADAdBgNVHSUEFjAUBggr +BgEFBQcDAgYIKwYBBQUHAwEwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDovL2NybC5o +YXJpY2EuZ3IvSEFSSUNBLVRMUy1Sb290LTIwMjEtRUNDLmNybDAdBgNVHQ4EFgQU +6ZkGjRcfq/uWGlrIW15dXuzanI8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMD +A2kAMGYCMQD2M1caaY2OwmthgmANUQg3LBLI0/2LiCdxa2zNq0G59wVzbjEk0cR/ +px52OegIwRACMQCk+iTmBlR6Xfv6igiiaFiPYfN2HfbcYLWbot5DZ2H1b4JVJV+V +rga7uu50SDG9hf4= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA OV TLS ECC +-----BEGIN CERTIFICATE----- +MIIDcjCCAvigAwIBAgIQbIPKxKaS8zQphK9yBQyPDDAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDMxOTA5MzM1MloXDTM2MDMxNTA5MzM1MVowYjELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExGjAYBgNVBAMMEUhBUklDQSBPViBUTFMgRUNDMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAECLBRkImpVT10VjeoeRvNhOsxxQe+XVQO37r8 +PPKVlSWR/+78tD+FMEUTLGGjVUd4+KFCM2Yc2V3fZyoQIaxUzbhUYwIavuaF0V9l +t0cfPISZ1rcQEHn3yAr1ON1eoT3jo4IBZzCCAWMwEgYDVR0TAQH/BAgwBgEB/wIB +ADAfBgNVHSMEGDAWgBTJG1OBEv4E1RbRqryab7eglRluyjBUBggrBgEFBQcBAQRI +MEYwRAYIKwYBBQUHMAKGOGh0dHA6Ly9yZXBvLmhhcmljYS5nci9jZXJ0cy9IQVJJ +Q0EtVExTLVJvb3QtMjAyMS1FQ0MuY2VyMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8G +CCsGAQUFBwIBFiNodHRwOi8vcmVwby5oYXJpY2EuZ3IvZG9jdW1lbnRzL0NQUzAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwQgYDVR0fBDswOTA3oDWgM4Yx +aHR0cDovL2NybC5oYXJpY2EuZ3IvSEFSSUNBLVRMUy1Sb290LTIwMjEtRUNDLmNy +bDAdBgNVHQ4EFgQUvrSdrMbKJ79Ox9kcg/5aTh6XB58wDgYDVR0PAQH/BAQDAgGG +MAoGCCqGSM49BAMDA2gAMGUCMQCBJIThQHLwid4SHT+YoWXd7tEFwKf6OsIX+M4U +fh2/UAp8bCiB7D/lcAvFj9YPajcCME5DsmcLbYE7D44HlLoqVcr7RDdh84nG6Dsp +8+YS3BnKAIONAWeGq4jawr3lD667Mw== +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA TLS RSA Root CA 2021 +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=GEANT TLS RSA 1 +-----BEGIN CERTIFICATE----- +MIIGBTCCA+2gAwIBAgIQFNV782kiKCGaVWf6kWUbIjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTI1MDEwMzExMTUwMFoXDTM5MTIzMTExMTQ1OVowYDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExGDAWBgNVBAMMD0dFQU5UIFRMUyBSU0EgMTCC +AaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKEEaZSzEzznAPk8IEa17GSG +yJzPTj4cwRY7/vcq2BPT5+IRGxQtaCdgLXIEl2cdPdIkj2eyakFmgMjAtyeju8V8 +dRayQCD/bWjJ7thDlowgLljQaXirxnYbT8bzRHAhCZqBakYgi5KWw9dANLyDHGpX +UdY259ab0lWEaFE5Uu6IzQSMJOAy4l/Twym8GUiy0qMDEBFSlm31C9BXpdHKKAlh +vIjMiKoDeTWl5vZaLB2MMRGY1yW2ftPgIP0/MkX1uFITlvHmmMTngxplH1nybEIJ +FiwHg1KiLk1TprcZgeO2gxE5Lz3wTFWrsUlAzrh5xWmscWkjNi/4BpeuiT5+NExF +czboLnXOfjuci/7bsnPi1/aZN/iKNbJRnngFoLaKVMmqCS7Xo34f+BITatryQZFE +u2oDKExQGlxDBCfYMLgLucX/onpLzUSgeQITNLx6i5tGGbUYH+9Dy3GI66L/5tPj +qzlOsydki8ZYGE5SBJeWCZ2IrhUe0WzZ2b6Zhk6JAQIDAQABo4IBLTCCASkwEgYD +VR0TAQH/BAgwBgEB/wIBADAfBgNVHSMEGDAWgBQKSCOmYKSSCjPqk1vFV+olTb0S +7jBNBggrBgEFBQcBAQRBMD8wPQYIKwYBBQUHMAKGMWh0dHA6Ly9jcnQuaGFyaWNh +LmdyL0hBUklDQS1UTFMtUm9vdC0yMDIxLVJTQS5jZXIwEQYDVR0gBAowCDAGBgRV +HSAAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBCBgNVHR8EOzA5MDeg +NaAzhjFodHRwOi8vY3JsLmhhcmljYS5nci9IQVJJQ0EtVExTLVJvb3QtMjAyMS1S +U0EuY3JsMB0GA1UdDgQWBBSGAXI/jKlw4jEGUxbOAV9becg8OzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQELBQADggIBABkssjQzYrOo4GMsKegaChP16yNe6Sck +cWBymM455R2rMeuQ3zlxUNOEt+KUfgueOA2urp4j6TlPbs/XxpwuN3I1f09Luk5b ++ZgRXM7obE6ZLTerVQWKoTShyl34R2XlK8pEy7+67Ht4lcJzt+K6K5gEuoPSGQDP +ef+fUfmXrFcgBMcMbtfDb9dubFKNZZxo5nAXiqhFMOIyByag3H+tOTuH8zuId9pH +RDsUpAIHJ9/W2WBfLcKav7IKRlNBRD/sPBy903J9WHPKwl8kQSDA+aa7XCYk7bJt +Eyf+7GM9F5cZ7+YyknXqnv/rtQEkTKZdQo5Us18VFe9qqj94tXbLdk7PejJYNB4O +Zlli44Ld7rtqfFlUych7gIxFOmiyxMQQYrYmUi+74lEZvfoNhuref0CupuKpz6O3 +dLv6kO9T10uNdDBoBQTkge3UzHafTIe3R2o3ujXKUGPwyc9m7/FETyKLUCwSU/5O +AVOeBCU8QtkKKjM8AmbpKpe3pHWcyq3R7B3LmIALkMPTydyDfxen65IDqREbVq8N +xjhkJThUz40JqOlN6uqKqeDISj/IoucYwsqW24AlO7ZzNmohQmMi8ep23H4hBSh0 +GBTe2XvkuzaNf92syK8l2HzO+13GLCjzYLTPvXTO9UpK8DGyfGZOuamuwbAnbNpE +3RfjV9IaUQGJ +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA OV TLS RSA +-----BEGIN CERTIFICATE----- +MIIGwTCCBKmgAwIBAgIQHEYUQ2gTTV1QnQ3HytsfuDANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDMxOTA5MzQxN1oXDTM2MDMxNTA5MzQxNlowYjEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExGjAYBgNVBAMMEUhBUklDQSBPViBUTFMgUlNB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBjrEt4NmTflA4DbZjPC +HIHeSKt4GSvZrb8Wr9K/xCe/US+tRc7lMvp0SJSxtfEbb9tNo5I99YYegbgT2JOo +8E/d7fblR/x25dHORtfe2xAO8liK8aswTmb69QXIP81zNnC7juFWgnfDZg8Loz7m +O8qcKeYS7pejngD4YeiVTf3j2jyJ5BI4Y24IWwcO9Er1hW282He04eQTa9z3Ta9K +00jG7foADE9q9ZOgPAiNV3tDzsaGC5HftgdmJurC/t+SD7wTLNSOGeCBf0g3jIhE +QW8FAZYp4OpytgVk8yEtos0izSIXqJn9AMG/HH21EO2tb6S09+8NZEFGh/+GoGuD +vDW2Nhw6FZm/yctI6nvJI4t2du39gyXql15w6ENucbM8nAyn3rcx6sHAjjgGZw98 +PRnaXXj3lMWvRSpWbBBrV0cj7eJaVFhDR+OG/2jPi8xqXaFaFGvInGW43HpQDAE5 +ATZ+8uywd1S8jA484TmyKr2XUx3OIwvpRxZDy0GvHWiqq03liS/d83vFVvZRq34H +AMKSeLpLt6XpucnwNWJyv5RvmzsTzKfL/XrwJ9tkXWWp+sqGuScZNZApKMUhS+7w +NKv84eX/YBKmtuQch6Xgbw0D8JjBk4my35qLw4wGhX8AM1gVErcotCrMwswiSTLy +XJxm/VX2p8xRWCgrS49G2eMCAwEAAaOCAWcwggFjMBIGA1UdEwEB/wQIMAYBAf8C +AQAwHwYDVR0jBBgwFoAUCkgjpmCkkgoz6pNbxVfqJU29Eu4wVAYIKwYBBQUHAQEE +SDBGMEQGCCsGAQUFBzAChjhodHRwOi8vcmVwby5oYXJpY2EuZ3IvY2VydHMvSEFS +SUNBLVRMUy1Sb290LTIwMjEtUlNBLmNlcjBEBgNVHSAEPTA7MDkGBFUdIAAwMTAv +BggrBgEFBQcCARYjaHR0cDovL3JlcG8uaGFyaWNhLmdyL2RvY3VtZW50cy9DUFMw +HQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMEIGA1UdHwQ7MDkwN6A1oDOG +MWh0dHA6Ly9jcmwuaGFyaWNhLmdyL0hBUklDQS1UTFMtUm9vdC0yMDIxLVJTQS5j +cmwwHQYDVR0OBBYEFOCbPX01Y8XsnQdzqKeJIPRPhPujMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQsFAAOCAgEAbd2n5s7VSvUO8NUXH11Ml5Pdh+W+AaWEJdwG +8qtnSX+/WL33OsJx1YN5jNjSoEWK4kswoEvrrMP7rg8w920kL5lzgoByQbP3jrJL +Pj4LnvuqIFj6lz9eMA1kgYqy5BvnepQB/smx58K/CNadvXhtxSU+PuIgoaoKexHM +9AWAMIIKFkE/SbYd8lR2mzp5rgeSD+rYExOxvpG/f713fDFRTH+SyqMREw34d2rD +dtSBK5GerrV1F7C//KGM016EWCz59kutui8qyZJNq7dw1BChbEo1ho9ekN5nQ8t/ +ckF7lJkrMoYRyZobJbQs45BfDMXyRFT4u4N1Z+2GyEvgryqlIQfQ3SUamELnQ6Ta +3oia4pLt/SYrRJOJ3I1EhlMgmi9dS/vCiXcDoMqConepk3/gDFtf8NatbQk2+vBW +mkTcSxlAktWFFu3iAN+0hOPQWDtXofUecFVLwNzAFMhVFw8yd9h8AuM3ThZPNSW1 +0IFer8+cEkvBy2VBbg2MtXn3Duu3NGJk9xf1e752048foDAfrRcuPTZdijtvRFJ+ +G1d/8t9mQqer39S8HONbR+1Zx6KjlFbTkq+Jaivcg6X1JapDjDG8PMiaPX8WE5Tt +jmZ8mSoDuXjqXaQ03VFrr+IweD985Ryq/eDM79dQc8SawzNwzLdNkmczMhLfE9ut +wIHJbMg= +-----END CERTIFICATE----- +Subject: C=DE,O=T-Systems Enterprise Services GmbH,OU=T-Systems Trust Center,CN=T-TeleSec GlobalRoot Class 2 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- +Subject: C=DE,O=Verein zur Foerderung eines Deutschen Forschungsnetzes e. V.,OU=DFN-PKI,CN=DFN-Verein Certification Authority 2 +-----BEGIN CERTIFICATE----- +MIIFEjCCA/qgAwIBAgIJAOML1fivJdmBMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJERTErMCkGA1UECgwiVC1TeXN0ZW1zIEVudGVycHJpc2UgU2VydmljZXMg +R21iSDEfMB0GA1UECwwWVC1TeXN0ZW1zIFRydXN0IENlbnRlcjElMCMGA1UEAwwc +VC1UZWxlU2VjIEdsb2JhbFJvb3QgQ2xhc3MgMjAeFw0xNjAyMjIxMzM4MjJaFw0z +MTAyMjIyMzU5NTlaMIGVMQswCQYDVQQGEwJERTFFMEMGA1UEChM8VmVyZWluIHp1 +ciBGb2VyZGVydW5nIGVpbmVzIERldXRzY2hlbiBGb3JzY2h1bmdzbmV0emVzIGUu +IFYuMRAwDgYDVQQLEwdERk4tUEtJMS0wKwYDVQQDEyRERk4tVmVyZWluIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5IDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDLYNf/ZqFBzdL6h5eKc6uZTepnOVqhYIBHFU6MlbLlz87TV0uNzvhWbBVV +dgfqRv3IA0VjPnDUq1SAsSOcvjcoqQn/BV0YD8SYmTezIPZmeBeHwp0OzEoy5xad +rg6NKXkHACBU3BVfSpbXeLY008F0tZ3pv8B3Teq9WQfgWi9sPKUA3DW9ZQ2PfzJt +8lpqS2IB7qw4NFlFNkkF2njKam1bwIFrEczSPKiL+HEayjvigN0WtGd6izbqTpEp +PbNRXK2oDL6dNOPRDReDdcQ5HrCUCxLx1WmOJfS4PSu/wI7DHjulv1UQqyquF5de +M87I8/QJB+MChjFGawHFEAwRx1npAgMBAAGjggF0MIIBcDAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFJPj2DIm2tXxSqWRSuDqS+KiDM/hMB8GA1UdIwQYMBaAFL9Z +IDYAeaCgImuM1fJh0rgsy4JKMBIGA1UdEwEB/wQIMAYBAf8CAQIwMwYDVR0gBCww +KjAPBg0rBgEEAYGtIYIsAQEEMA0GCysGAQQBga0hgiweMAgGBmeBDAECAjBMBgNV +HR8ERTBDMEGgP6A9hjtodHRwOi8vcGtpMDMzNi50ZWxlc2VjLmRlL3JsL1RlbGVT +ZWNfR2xvYmFsUm9vdF9DbGFzc18yLmNybDCBhgYIKwYBBQUHAQEEejB4MCwGCCsG +AQUFBzABhiBodHRwOi8vb2NzcDAzMzYudGVsZXNlYy5kZS9vY3NwcjBIBggrBgEF +BQcwAoY8aHR0cDovL3BraTAzMzYudGVsZXNlYy5kZS9jcnQvVGVsZVNlY19HbG9i +YWxSb290X0NsYXNzXzIuY2VyMA0GCSqGSIb3DQEBCwUAA4IBAQCHC/8+AptlyFYt +1juamItxT9q6Kaoh+UYu9bKkD64ROHk4sw50unZdnugYgpZi20wz6N35at8yvSxM +R2BVf+d0a7Qsg9h5a7a3TVALZge17bOXrerufzDmmf0i4nJNPoRb7vnPmep/11I5 +LqyYAER+aTu/de7QCzsazeX3DyJsR4T2pUeg/dAaNH2t0j13s+70103/w+jlkk9Z +PpBHEEqwhVjAb3/4ru0IQp4e1N8ULk2PvJ6Uw+ft9hj4PEnnJqinNtgs3iLNi4LY +2XjiVRKjO4dEthEL1QxSr2mMDwbf0KJTi1eYe8/9ByT0/L3D/UqSApcb8re2z2WK +GqK1chk5 +-----END CERTIFICATE----- +Subject: C=DE,O=Verein zur Foerderung eines Deutschen Forschungsnetzes e. V.,OU=DFN-PKI,CN=DFN-Verein Global Issuing CA +-----BEGIN CERTIFICATE----- +MIIFrDCCBJSgAwIBAgIHG2O60B4sPTANBgkqhkiG9w0BAQsFADCBlTELMAkGA1UE +BhMCREUxRTBDBgNVBAoTPFZlcmVpbiB6dXIgRm9lcmRlcnVuZyBlaW5lcyBEZXV0 +c2NoZW4gRm9yc2NodW5nc25ldHplcyBlLiBWLjEQMA4GA1UECxMHREZOLVBLSTEt +MCsGA1UEAxMkREZOLVZlcmVpbiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4X +DTE2MDUyNDExMzg0MFoXDTMxMDIyMjIzNTk1OVowgY0xCzAJBgNVBAYTAkRFMUUw +QwYDVQQKDDxWZXJlaW4genVyIEZvZXJkZXJ1bmcgZWluZXMgRGV1dHNjaGVuIEZv +cnNjaHVuZ3NuZXR6ZXMgZS4gVi4xEDAOBgNVBAsMB0RGTi1QS0kxJTAjBgNVBAMM +HERGTi1WZXJlaW4gR2xvYmFsIElzc3VpbmcgQ0EwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCdO3kcR94fhsvGadcQnjnX2aIw23IcBX8pX0to8a0Z1kzh +axuxC3+hq+B7i4vYLc5uiDoQ7lflHn8EUTbrunBtY6C+li5A4dGDTGY9HGRp5Zuk +rXKuaDlRh3nMF9OuL11jcUs5eutCp5eQaQW/kP+kQHC9A+e/nhiIH5+ZiE0OR41I +X2WZENLZKkntwbktHZ8SyxXTP38eVC86rpNXp354ytVK4hrl7UF9U1/Isyr1ijCs +7RcFJD+2oAsH/U0amgNSoDac3iSHZeTn+seWcyQUzdDoG2ieGFmudn730Qp4PIdL +sDfPU8o6OBDzy0dtjGQ9PFpFSrrKgHy48+enTEzNAgMBAAGjggIFMIICATASBgNV +HRMBAf8ECDAGAQH/AgEBMA4GA1UdDwEB/wQEAwIBBjApBgNVHSAEIjAgMA0GCysG +AQQBga0hgiweMA8GDSsGAQQBga0hgiwBAQQwHQYDVR0OBBYEFGs6mIv58lOJ2uCt +sjIeCR/oqjt0MB8GA1UdIwQYMBaAFJPj2DIm2tXxSqWRSuDqS+KiDM/hMIGPBgNV +HR8EgYcwgYQwQKA+oDyGOmh0dHA6Ly9jZHAxLnBjYS5kZm4uZGUvZ2xvYmFsLXJv +b3QtZzItY2EvcHViL2NybC9jYWNybC5jcmwwQKA+oDyGOmh0dHA6Ly9jZHAyLnBj +YS5kZm4uZGUvZ2xvYmFsLXJvb3QtZzItY2EvcHViL2NybC9jYWNybC5jcmwwgd0G +CCsGAQUFBwEBBIHQMIHNMDMGCCsGAQUFBzABhidodHRwOi8vb2NzcC5wY2EuZGZu +LmRlL09DU1AtU2VydmVyL09DU1AwSgYIKwYBBQUHMAKGPmh0dHA6Ly9jZHAxLnBj +YS5kZm4uZGUvZ2xvYmFsLXJvb3QtZzItY2EvcHViL2NhY2VydC9jYWNlcnQuY3J0 +MEoGCCsGAQUFBzAChj5odHRwOi8vY2RwMi5wY2EuZGZuLmRlL2dsb2JhbC1yb290 +LWcyLWNhL3B1Yi9jYWNlcnQvY2FjZXJ0LmNydDANBgkqhkiG9w0BAQsFAAOCAQEA +gXhFpE6kfw5V8Amxaj54zGg1qRzzlZ4/8/jfazh3iSyNta0+x/KUzaAGrrrMqLGt +Mwi2JIZiNkx4blDw1W5gjU9SMUOXRnXwYuRuZlHBQjFnUOVJ5zkey5/KhkjeCBT/ +FUsrZpugOJ8Azv2n69F/Vy3ITF/cEBGXPpYEAlyEqCk5bJT8EJIGe57u2Ea0G7UD +DDjZ3LCpP3EGC7IDBzPCjUhjJSU8entXbveKBTjvuKCuL/TbB9VbhBjBqbhLzmyQ +GoLkuT36d/HSHzMCv1PndvncJiVBby+mG/qkE5D6fH7ZC2Bd7L/KQaBh+xFJKdio +LXUV2EoY6hbvVTQiGhONBg== +-----END CERTIFICATE----- +Subject: C=DE,ST=Bayern,L=Muenchen,O=Fraunhofer,OU=Fraunhofer Corporate PKI,CN=Fraunhofer User CA - G02 +-----BEGIN CERTIFICATE----- +MIIFqzCCBJOgAwIBAgIHG2O6xotSQjANBgkqhkiG9w0BAQsFADCBlTELMAkGA1UE +BhMCREUxRTBDBgNVBAoTPFZlcmVpbiB6dXIgRm9lcmRlcnVuZyBlaW5lcyBEZXV0 +c2NoZW4gRm9yc2NodW5nc25ldHplcyBlLiBWLjEQMA4GA1UECxMHREZOLVBLSTEt +MCsGA1UEAxMkREZOLVZlcmVpbiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4X +DTE2MDUyNDExMzgzMFoXDTMxMDIyMjIzNTk1OVowgYwxCzAJBgNVBAYTAkRFMQ8w +DQYDVQQIDAZCYXllcm4xETAPBgNVBAcMCE11ZW5jaGVuMRMwEQYDVQQKDApGcmF1 +bmhvZmVyMSEwHwYDVQQLDBhGcmF1bmhvZmVyIENvcnBvcmF0ZSBQS0kxITAfBgNV +BAMMGEZyYXVuaG9mZXIgVXNlciBDQSAtIEcwMjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAL1FxEM9lv2hli0ZvDCDNvVn2mIcIJ4cvKUQ30l5ZARkKEtY +KlqSsrof2WmWFdxx1DPwFtp38QgbsbOPKSDoYiWw+ycW403f1ZJAvn6c5N07oT1A +j3xzYG/QxgY+qkLhKJnV0pr7afC+VIFB/wmNFhvlyXpAVy07NOikiRFlBzw/s4/H +HdHTL2VKEjCnfD6rzKtTN+D+24UYXiMvNzmPyWQMQhIxuDMPTwfoqBFL7leo334z +gHm78leaLK52DTzpHvlNHP4DnTRa2ipVpyj3yQWcOBw+75oVE71AbyFPzw1a0J9P +wisZljQ0eTb6/tkF81W5f6UD7NN4Ie2gxEvZkysCAwEAAaOCAgUwggIBMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMCkGA1UdIAQiMCAwDQYLKwYB +BAGBrSGCLB4wDwYNKwYBBAGBrSGCLAEBBDAdBgNVHQ4EFgQUoyb1mk/b2jvhtaru +WIvQ/P+1bVEwHwYDVR0jBBgwFoAUk+PYMiba1fFKpZFK4OpL4qIMz+EwgY8GA1Ud +HwSBhzCBhDBAoD6gPIY6aHR0cDovL2NkcDEucGNhLmRmbi5kZS9nbG9iYWwtcm9v +dC1nMi1jYS9wdWIvY3JsL2NhY3JsLmNybDBAoD6gPIY6aHR0cDovL2NkcDIucGNh +LmRmbi5kZS9nbG9iYWwtcm9vdC1nMi1jYS9wdWIvY3JsL2NhY3JsLmNybDCB3QYI +KwYBBQUHAQEEgdAwgc0wMwYIKwYBBQUHMAGGJ2h0dHA6Ly9vY3NwLnBjYS5kZm4u +ZGUvT0NTUC1TZXJ2ZXIvT0NTUDBKBggrBgEFBQcwAoY+aHR0cDovL2NkcDEucGNh +LmRmbi5kZS9nbG9iYWwtcm9vdC1nMi1jYS9wdWIvY2FjZXJ0L2NhY2VydC5jcnQw +SgYIKwYBBQUHMAKGPmh0dHA6Ly9jZHAyLnBjYS5kZm4uZGUvZ2xvYmFsLXJvb3Qt +ZzItY2EvcHViL2NhY2VydC9jYWNlcnQuY3J0MA0GCSqGSIb3DQEBCwUAA4IBAQDE +klJ5thEqcbIyroBrhnWl00oAUXYuYQqXDpBTCzF08aBk41FyLZXH54w7NUecdTZp +aq0SgueogbSv9viT8EzQ5Bt6bapOl5pVjEb6yBrV0Y3d5Vb3h8WA+/E+DRz/uV5M +gKdVHdazimuXK1HHf0UHSFiEkgiY3xSQr6CRH7UzzivWj9s6B2GUuP4zzfKrgAxQ +N2IaEUtvWiwl5Mtv55p6wmnBOhCnCtj1Ge5Du7jR8o2xAh//fZ4tz5UQ7XXVoeu5 +iueZp3qNb0cTtiGGPJxVtFmoaFNltp+zI/y52hF9XgjC+mJBVYYahmacW3wMpIAO +R0lstmlDfiKvecyE6S4v +-----END CERTIFICATE----- +Subject: C=US,ST=New Jersey,L=Jersey City,O=The USERTRUST Network,CN=USERTrust ECC Certification Authority +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT OV ECC CA 4 +-----BEGIN CERTIFICATE----- +MIIDeTCCAv+gAwIBAgIRAOuOgRlxKfSvZO+BSi9QzukwCgYIKoZIzj0EAwMwgYgx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtKZXJz +ZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYDVQQD +EyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIwMDIx +ODAwMDAwMFoXDTMzMDUwMTIzNTk1OVowRDELMAkGA1UEBhMCTkwxGTAXBgNVBAoT +EEdFQU5UIFZlcmVuaWdpbmcxGjAYBgNVBAMTEUdFQU5UIE9WIEVDQyBDQSA0MFkw +EwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEXYkvGrfrMs2IwdI5+IwpEwPh+igW/BOW +etmOwP/ZIXC8fNeC3/ZYPAAMyRpFS0v3/c55FDTE2xbOUZ5zeVZYQqOCAYswggGH +MB8GA1UdIwQYMBaAFDrhCYbUzxnClnZ0SXbc4DXGY2OaMB0GA1UdDgQWBBTttKAz +ahsIkba9+kGSvZqrq2P0UzAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB +/wIBADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwOAYDVR0gBDEwLzAt +BgRVHSAAMCUwIwYIKwYBBQUHAgEWF2h0dHBzOi8vc2VjdGlnby5jb20vQ1BTMFAG +A1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VU0VSVHJ1 +c3RFQ0NDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDB2BggrBgEFBQcBAQRqMGgw +PwYIKwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RF +Q0NBZGRUcnVzdENBLmNydDAlBggrBgEFBQcwAYYZaHR0cDovL29jc3AudXNlcnRy +dXN0LmNvbTAKBggqhkjOPQQDAwNoADBlAjAfs9nsM0qaJGVu6DpWVy4qojiOpwV1 +h/MWZ5GJxy6CKv3+RMB3STkaFh0+Hifbk24CMQDRf/ujXAQ1b4nFpZGaSIKldygc +dCDAxbAd9tlxcN/+J534CJDblzd/40REzGWwS5k= +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT Personal ECC CA 4 +-----BEGIN CERTIFICATE----- +MIIDfjCCAwSgAwIBAgIQdpAhff5d1sLEUCfF3NFaJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMjAwMjE4 +MDAwMDAwWhcNMzMwNTAxMjM1OTU5WjBKMQswCQYDVQQGEwJOTDEZMBcGA1UEChMQ +R0VBTlQgVmVyZW5pZ2luZzEgMB4GA1UEAxMXR0VBTlQgUGVyc29uYWwgRUNDIENB +IDQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQYZ2cR4O5tfdskg2NNo346Noyo +enQjWhWQElTQQ+brmMxr5leW1AfpAJ9bsaUadsCJJnA5ycFITYjXfl/XvfPko4IB +izCCAYcwHwYDVR0jBBgwFoAUOuEJhtTPGcKWdnRJdtzgNcZjY5owHQYDVR0OBBYE +FKgtbYEyZI3msk+s/hHyZZmFE6luMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8E +CDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDA4BgNVHSAE +MTAvMC0GBFUdIAAwJTAjBggrBgEFBQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9D +UFMwUAYDVR0fBEkwRzBFoEOgQYY/aHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VT +RVJUcnVzdEVDQ0NlcnRpZmljYXRpb25BdXRob3JpdHkuY3JsMHYGCCsGAQUFBwEB +BGowaDA/BggrBgEFBQcwAoYzaHR0cDovL2NydC51c2VydHJ1c3QuY29tL1VTRVJU +cnVzdEVDQ0FkZFRydXN0Q0EuY3J0MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC51 +c2VydHJ1c3QuY29tMAoGCCqGSM49BAMDA2gAMGUCMQCCX6P32oo7RiAIk1DIekZM +nFGZwY+xJoZ5HyChGc1Ncuupnh7Ezukr1EnL+MyAhNcCMD6DlSMWE5I++OBvznnX +1npjvntLcKogArAPjLglGGeymFt4U6pdy7/C0/miHCPuDA== +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT eScience Personal ECC CA 4 +-----BEGIN CERTIFICATE----- +MIIDiDCCAw6gAwIBAgIRAPFVZjGnycu/NlCaWpIPalkwCgYIKoZIzj0EAwMwgYgx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtKZXJz +ZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYDVQQD +EyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIwMDIx +ODAwMDAwMFoXDTMzMDUwMTIzNTk1OVowUzELMAkGA1UEBhMCTkwxGTAXBgNVBAoT +EEdFQU5UIFZlcmVuaWdpbmcxKTAnBgNVBAMTIEdFQU5UIGVTY2llbmNlIFBlcnNv +bmFsIEVDQyBDQSA0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE3Oad56Xl15SY +nK3MmKcZUEu17k4jApQvPwTnGUqFxMbgDvAtxJtWqbKk8qvCOcZ/oCyrHkloS6Nf +AiTDTV5bZ6OCAYswggGHMB8GA1UdIwQYMBaAFDrhCYbUzxnClnZ0SXbc4DXGY2Oa +MB0GA1UdDgQWBBTt5lFk6mex24xLv7WAJ5g/2CMeZTAOBgNVHQ8BAf8EBAMCAYYw +EgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwOAYDVR0gBDEwLzAtBgRVHSAAMCUwIwYIKwYBBQUHAgEWF2h0dHBzOi8vc2Vj +dGlnby5jb20vQ1BTMFAGA1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly9jcmwudXNlcnRy +dXN0LmNvbS9VU0VSVHJ1c3RFQ0NDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDB2 +BggrBgEFBQcBAQRqMGgwPwYIKwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRydXN0 +LmNvbS9VU0VSVHJ1c3RFQ0NBZGRUcnVzdENBLmNydDAlBggrBgEFBQcwAYYZaHR0 +cDovL29jc3AudXNlcnRydXN0LmNvbTAKBggqhkjOPQQDAwNoADBlAjA8KoDpeqid +slDXmfwHD7kr0XTY8rOdQBWMzT5uU7nPROEYLK00Dc9w/J4M8CGaLX8CMQDoaX4P +os4y0yfmvRAPaFZxyJi1ZHaZh+G0dX7ggOEyMHmT0P57T6TjdfcBr1G/J/M= +-----END CERTIFICATE----- +Subject: C=GB,ST=Greater Manchester,L=Salford,O=Sectigo Limited,CN=Sectigo ECC Organization Validation Secure Server CA +-----BEGIN CERTIFICATE----- +MIIDrjCCAzOgAwIBAgIQNb50Y4yz6d4oBXC3l4CzZzAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTgxMTAy +MDAwMDAwWhcNMzAxMjMxMjM1OTU5WjCBlTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEYMBYGA1UEChMP +U2VjdGlnbyBMaW1pdGVkMT0wOwYDVQQDEzRTZWN0aWdvIEVDQyBPcmdhbml6YXRp +b24gVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENBMFkwEwYHKoZIzj0CAQYIKoZI +zj0DAQcDQgAEnI5cCmFvoVij0NXO+vxE+f+6Bh57FhpyH0LTCrJmzfsPSXIhTSex +r92HOlz+aHqoGE0vSe/CSwLFoWcZ8W1jOaOCAW4wggFqMB8GA1UdIwQYMBaAFDrh +CYbUzxnClnZ0SXbc4DXGY2OaMB0GA1UdDgQWBBRNSu/ERrMSrU9OmrFZ4lGrCBB4 +CDAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAU +BggrBgEFBQcDAQYIKwYBBQUHAwIwGwYDVR0gBBQwEjAGBgRVHSAAMAgGBmeBDAEC +AjBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVNF +UlRydXN0RUNDQ2VydGlmaWNhdGlvbkF1dGhvcml0eS5jcmwwdgYIKwYBBQUHAQEE +ajBoMD8GCCsGAQUFBzAChjNodHRwOi8vY3J0LnVzZXJ0cnVzdC5jb20vVVNFUlRy +dXN0RUNDQWRkVHJ1c3RDQS5jcnQwJQYIKwYBBQUHMAGGGWh0dHA6Ly9vY3NwLnVz +ZXJ0cnVzdC5jb20wCgYIKoZIzj0EAwMDaQAwZgIxAOk//uo7i/MoeKdcyeqvjOXs +BJFGLI+1i0d+Tty7zEnn2w4DNS21TK8wmY3Kjm3EmQIxAPI1qHM/I+OS+hx0OZhG +fDoNifTe/GxgWZ1gOYQKzn6lwP0yGKlrP+7vrVC8IczJ4A== +-----END CERTIFICATE----- +Subject: C=US,ST=New Jersey,L=Jersey City,O=The USERTRUST Network,CN=USERTrust RSA Certification Authority +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT OV RSA CA 4 +-----BEGIN CERTIFICATE----- +MIIG5TCCBM2gAwIBAgIRANpDvROb0li7TdYcrMTz2+AwDQYJKoZIhvcNAQEMBQAw +gYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtK +ZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYD +VQQDEyVVU0VSVHJ1c3QgUlNBIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIw +MDIxODAwMDAwMFoXDTMzMDUwMTIzNTk1OVowRDELMAkGA1UEBhMCTkwxGTAXBgNV +BAoTEEdFQU5UIFZlcmVuaWdpbmcxGjAYBgNVBAMTEUdFQU5UIE9WIFJTQSBDQSA0 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEApYhi1aEiPsg9ZKRMAw9Q +r8Mthsr6R20VSfFeh7TgwtLQi6RSRLOh4or4EMG/1th8lijv7xnBMVZkTysFiPmT +PiLOfvz+QwO1NwjvgY+Jrs7fSoVA/TQkXzcxu4Tl3WHi+qJmKLJVu/JOuHud6mOp +LWkIbhODSzOxANJ24IGPx9h4OXDyy6/342eE6UPXCtJ8AzeumTG6Dfv5KVx24lCF +TGUzHUB+j+g0lSKg/Sf1OzgCajJV9enmZ/84ydh48wPp6vbWf1H0O3Rd3LhpMSVn +TqFTLKZSbQeLcx/l9DOKZfBCC9ghWxsgTqW9gQ7v3T3aIfSaVC9rnwVxO0VjmDdP +FNbdoxnh0zYwf45nV1QQgpRwZJ93yWedhp4ch1a6Ajwqs+wv4mZzmBSjovtV0mKw +d+CQbSToalEUP4QeJq4Udz5WNmNMI4OYP6cgrnlJ50aa0DZPlJqrKQPGL69KQQz1 +2WgxvhCuVU70y6ZWAPopBa1ykbsttpLxADZre5cH573lIuLHdjx7NjpYIXRx2+QJ +URnX2qx37eZIxYXz8ggM+wXH6RDbU3V2o5DP67hXPHSAbA+p0orjAocpk2osxHKo +NSE3LCjNx8WVdxnXvuQ28tKdaK69knfm3bB7xpdfsNNTPH9ElcjscWZxpeZ5Iij8 +lyrCG1z0vSWtSBsgSnUyG/sCAwEAAaOCAYswggGHMB8GA1UdIwQYMBaAFFN5v1qq +K0rPVIDh2JvAnfKyA2bLMB0GA1UdDgQWBBRvHTVJEGwy+lmgnryK6B+VvnF6DDAO +BgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAUBggr +BgEFBQcDAQYIKwYBBQUHAwIwOAYDVR0gBDEwLzAtBgRVHSAAMCUwIwYIKwYBBQUH +AgEWF2h0dHBzOi8vc2VjdGlnby5jb20vQ1BTMFAGA1UdHwRJMEcwRaBDoEGGP2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FDZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDB2BggrBgEFBQcBAQRqMGgwPwYIKwYBBQUHMAKGM2h0dHA6 +Ly9jcnQudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FBZGRUcnVzdENBLmNydDAl +BggrBgEFBQcwAYYZaHR0cDovL29jc3AudXNlcnRydXN0LmNvbTANBgkqhkiG9w0B +AQwFAAOCAgEAUtlC3e0xj/1BMfPhdQhUXeLjb0xp8UE28kzWE5xDzGKbfGgnrT2R +lw5gLIx+/cNVrad//+MrpTppMlxq59AsXYZW3xRasrvkjGfNR3vt/1RAl8iI31lG +hIg6dfIX5N4esLkrQeN8HiyHKH6khm4966IkVVtnxz5CgUPqEYn4eQ+4eeESrWBh +AqXaiv7HRvpsdwLYekAhnrlGpioZ/CJIT2PTTxf+GHM6cuUnNqdUzfvrQgA8kt1/ +ASXx2od/M+c8nlJqrGz29lrJveJOSEMX0c/ts02WhsfMhkYa6XujUZLmvR1Eq08r +48/EZ4l+t5L4wt0DV8VaPbsEBF1EOFpz/YS2H6mSwcFaNJbnYqqJHIvm3PLJHkFm +EoLXRVrQXdCT+3wgBfgU6heCV5CYBz/YkrdWES7tiiT8sVUDqXmVlTsbiRNiyLs2 +bmEWWFUl76jViIJog5fongEqN3jLIGTG/mXrJT1UyymIcobnIGrbwwRVz/mpFQo0 +vBYIi1k2ThVh0Dx88BbF9YiP84dd8Fkn5wbE6FxXYJ287qfRTgmhePecPc73Yrzt +apdRcsKVGkOpaTIJP/l+lAHRLZxk/dUtyN95G++bOSQqnOCpVPabUGl2E/OEyFrp +Ipwgu2L/WJclvd6g+ZA/iWkLSMcpnFb+uX6QBqvD6+RNxul1FaB5iHY= +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT Personal CA 4 +-----BEGIN CERTIFICATE----- +MIIG5jCCBM6gAwIBAgIQMQJw1DW+mySa+FbQ4eKFSTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMjAw +MjE4MDAwMDAwWhcNMzMwNTAxMjM1OTU5WjBGMQswCQYDVQQGEwJOTDEZMBcGA1UE +ChMQR0VBTlQgVmVyZW5pZ2luZzEcMBoGA1UEAxMTR0VBTlQgUGVyc29uYWwgQ0Eg +NDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALNK4iJeJ1vpBFsUBDUy +IBSutNIxQMbNUMAeoUTKr55KYX8tkN5imzNqLaRCypYBPP9wED2AaO6e8njkbjzJ +wLgPqDBkW9sG3kmi3GW6cF4Hwr5ysZqve/5EJDhV+9OhfTu/4dMnoR4Q41HcjMk9 +MzLOADAQ0awBZ/29r0d49AUmIKELNeqEqmnTN6fndL7x/2K0TLToZLxqS7sy/Jvi +0wEFr0CfdjcAsioh7KaD+Jizyb1aRKQzJ6Q20VEHX7UqWc1SkzTkbz6xj0S5ydBB +FQh0fNiy+qM/deVpK4HgmPSJrrpQZ+LlbHfWabmwoDPxF71QZVYiqrrAoUrGRJ+4 +7iLBiIg8miIYS7Hd2ppvAUt24CugMXUjETjQ+oYh09fNi5n/AvoER8UBvTHLxt+b +lL0bvL+2z2YiUWk+2Qtn+dD+JU5Z2y71qV7+cr+4YXjvGzF5bYsi8HiwflTb4Php +3y+k1twKtchdcq2QGc0eDG6Y01nRHUiyr8/PtMAsLHEPNZ2wzsA7fb8mftHiV20Z +FmYqknJ8AIOfwdTVA+E62JayOJ+sxadqcmFDorsz/mrPwGZ8+txr4xSuvVjg0dlv +0yuA+1YpBDIYNfL4bkX+IcZ1mTstL4Xw0f4N2iW3bBmnPnYmoYxMM8gflCiTgss7 +3nBvG2f7v1PD7BDGYNO4iD4vAgMBAAGjggGLMIIBhzAfBgNVHSMEGDAWgBRTeb9a +qitKz1SA4dibwJ3ysgNmyzAdBgNVHQ4EFgQUaQChxyFY+ODFGyCwCt2nUb8T2eQw +DgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0lBBYwFAYI +KwYBBQUHAwIGCCsGAQUFBwMEMDgGA1UdIAQxMC8wLQYEVR0gADAlMCMGCCsGAQUF +BwIBFhdodHRwczovL3NlY3RpZ28uY29tL0NQUzBQBgNVHR8ESTBHMEWgQ6BBhj9o +dHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVNFUlRydXN0UlNBQ2VydGlmaWNhdGlv +bkF1dGhvcml0eS5jcmwwdgYIKwYBBQUHAQEEajBoMD8GCCsGAQUFBzAChjNodHRw +Oi8vY3J0LnVzZXJ0cnVzdC5jb20vVVNFUlRydXN0UlNBQWRkVHJ1c3RDQS5jcnQw +JQYIKwYBBQUHMAGGGWh0dHA6Ly9vY3NwLnVzZXJ0cnVzdC5jb20wDQYJKoZIhvcN +AQEMBQADggIBAAoFTnsNjx8TOQD9b+xixsPt7Req4wHMeNw/R5dddEPgQAQAYJZK +z5BEv1cjGbH7nbPH3AxrxhN6OVH40p6OLIo9MXSrrfMzGs7/P+FTCjwgNxFEtLQ1 +KC9NboA3asJcl7mIs3l8h9iAgEH1zLUvq2s+5n++NQmbzudDsTFDMapY3kX1TwyU +CTRzmItqcbsYIyg2MeIXWfRtqPqC5R4bufmpzA5BPINLX340Sp/CNQ9QZqw3Vkfy +HWwTo+vO9Gm2L6srNamJT6Lb+TeXZvl8UPL5a72O/pH0GgGHjt6z9QzPARnaRKsh +VWviNK6ST4WmZHllu3CJg0BXqx1vWyswawgvNeWt1qxITacYe9mSWTbNR2CftvTU +werruDSY2jMaZPoNqbjUpuG/blYwWzzvVerBUhviAahPXJF/9V48ybWPBq6qKOEo +kW+s3B4ad5sY96KlovEijaIQDip1HO0SD+rLNYaiBcr9MV2aK+DfbZ8w9BaNCQyF +EYwzxIKOVk3bYvzHRk5ihUDascmbk/bkiNl74c/KfuKQmJImaqWoWZR6jBcXcPV0 +WUIKz/nILTpFhGojZEQW77by3aezAi9jrEIUBHRG1LwzPbJc2V3SOzYyaJFQatzu +KZbN1Q9s9y/2x1QXtKwREY8jNgvx0iIfOK35gKgYJJcyDql4XfuEc2nV +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT eScience Personal CA 4 +-----BEGIN CERTIFICATE----- +MIIG8DCCBNigAwIBAgIRAKoycu7aGxmmN/byVir07vEwDQYJKoZIhvcNAQEMBQAw +gYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtK +ZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYD +VQQDEyVVU0VSVHJ1c3QgUlNBIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIw +MDIxODAwMDAwMFoXDTMzMDUwMTIzNTk1OVowTzELMAkGA1UEBhMCTkwxGTAXBgNV +BAoTEEdFQU5UIFZlcmVuaWdpbmcxJTAjBgNVBAMTHEdFQU5UIGVTY2llbmNlIFBl +cnNvbmFsIENBIDQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCVokk9 +uR1UAJRcNg1NTatsuxEwya7FpTRm5wcbSJqSLeUd5gjI2zHzwjaevHwCjN716blp +iyvEVr96aGw2y+FrVo/0yr0+/XoK8WdkB64j3KmOdUttTTXERY1T1vXxvkZhchHl +JvD/VVDIt/V/xg2iDpAa2N3SW0SNSyuAGqtVaCTM4eOEgVT1Nyg5VtQJVLujMrvv +cW6+1W1rLR2O05FxQtINVnrblTKWotAitEv8yMe9qQr1Fz4sdyVj2cBwEk/zQLok +JRCSw981Hoh5kwt4AVMSxjfeSRjx0Wt0C6ioRJ8WAfSWGNDZwnUWZ4nJrW2UWtdJ +Z59t2VByBR3e8MlVTVE4pirg3R5lCCIzPhNLQm25Rap/a4m3+e9A+Jnjgfi3XzSB +fd+gcMh1xBXK+YOOfbnZW4H8T1Ty9I/HKsUp/isLV8TJsFOLZCyuV14qVfi8jmRU +0wYIK4vQRj2M7VxCyH4MPn6lgnyecIieL0b4gFNWhE2waH2gfigvpQWH6bQuSWIh +l0PaRDWFavo1SNTFKdAVQDK7w3Iw3XzOhQnjHgU/idvqd5eaqa2G4VN1vV1pNirC +LXJccKK49zDSS1IFMX9iQC4YFxc/BbWRndI3smg05dcYn7Di73B0EzFFO6dyg0WD +r2N9G/8fgjv89biSARMNzjtg2XR2cLxdT8PAYwIDAQABo4IBizCCAYcwHwYDVR0j +BBgwFoAUU3m/WqorSs9UgOHYm8Cd8rIDZsswHQYDVR0OBBYEFLYvVVqwyWAZ788J +WtHxE51sjMkEMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8ECDAGAQH/AgEAMB0G +A1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDA4BgNVHSAEMTAvMC0GBFUdIAAw +JTAjBggrBgEFBQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwUAYDVR0fBEkw +RzBFoEOgQYY/aHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VTRVJUcnVzdFJTQUNl +cnRpZmljYXRpb25BdXRob3JpdHkuY3JsMHYGCCsGAQUFBwEBBGowaDA/BggrBgEF +BQcwAoYzaHR0cDovL2NydC51c2VydHJ1c3QuY29tL1VTRVJUcnVzdFJTQUFkZFRy +dXN0Q0EuY3J0MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC51c2VydHJ1c3QuY29t +MA0GCSqGSIb3DQEBDAUAA4ICAQB7IGXk1vGM4J73d8UD3f71UqLxom42Icu/IP0V +nrIRzbAmz+C++7Bir4mqkKWt89dd9ZlIK8Ez5Y97tDEEd2eICo6dTvy5JOxiYsWK +iubpyqmW/K0xjUqutANMxxGXQOJn13RnI9OHSxNXng5OoM66O5Eq08vXdcQbXF3i +qPf95g94OJVoyFwlmYiIMuV6cOCVNShbGKvFPpNB4p+7vr2FJ7ZeHEmiZKMXO8ex +3Uq5j1riVT/4tCemFz7dSpaCkHJ0xj5Ayknj14+t/lwF7IcOdP9/15uDp8HkgNh9 +xXkFqF3wtLkidVvmmbvDmOQiLJL6Hj6MxwEwI7Cf4ZN/UH0eIra7tEgMP+mkdNtR +K3tB4lQNLquWBr25PbzMiK5LfXPGL+odDAb/7mE1ClQtg28gohf+Ms9hQ4y2rZSG +sYUgh+U++Yn+HlYKI26tl7qB6lFcQb6Prc1SELAEFob8yMFkZ3UnXXLRUkLeM50K +WV9s5wu4Rn6Rzp722wdTQoBeaSxZ5bjmT3QerGV2GaRmuYjLxdYGZ5Co07DI+qEJ +Tj4mbpHx/OiqD7tR9i3rHpPyTjpD6MhJ3U4EjZfd6UMWGMz3Zcw0tvJe1cLpNRX8 +yjTdSxF1/H7ni7/IHkK4trIBuc58YPbZXAfYTcPugPwQE6N7RcfQHA4McFX91SZa +DskXUg== +-----END CERTIFICATE----- +Subject: C=GB,ST=Greater Manchester,L=Salford,O=Sectigo Limited,CN=Sectigo RSA Organization Validation Secure Server CA +-----BEGIN CERTIFICATE----- +MIIGGTCCBAGgAwIBAgIQE31TnKp8MamkM3AZaIR6jTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTgx +MTAyMDAwMDAwWhcNMzAxMjMxMjM1OTU5WjCBlTELMAkGA1UEBhMCR0IxGzAZBgNV +BAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEYMBYGA1UE +ChMPU2VjdGlnbyBMaW1pdGVkMT0wOwYDVQQDEzRTZWN0aWdvIFJTQSBPcmdhbml6 +YXRpb24gVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAnJMCRkVKUkiS/FeN+S3qU76zLNXYqKXsW2kDwB0Q +9lkz3v4HSKjojHpnSvH1jcM3ZtAykffEnQRgxLVK4oOLp64m1F06XvjRFnG7ir1x +on3IzqJgJLBSoDpFUd54k2xiYPHkVpy3O/c8Vdjf1XoxfDV/ElFw4Sy+BKzL+k/h +fGVqwECn2XylY4QZ4ffK76q06Fha2ZnjJt+OErK43DOyNtoUHZZYQkBuCyKFHFEi +rsTIBkVtkuZntxkj5Ng2a4XQf8dS48+wdQHgibSov4o2TqPgbOuEQc6lL0giE5dQ +YkUeCaXMn2xXcEAG2yDoG9bzk4unMp63RBUJ16/9fAEc2wIDAQABo4IBbjCCAWow +HwYDVR0jBBgwFoAUU3m/WqorSs9UgOHYm8Cd8rIDZsswHQYDVR0OBBYEFBfZ1iUn +Z/kxwklD2TA2RIxsqU/rMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8ECDAGAQH/ +AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAbBgNVHSAEFDASMAYG +BFUdIAAwCAYGZ4EMAQICMFAGA1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly9jcmwudXNl +cnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNy +bDB2BggrBgEFBQcBAQRqMGgwPwYIKwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRy +dXN0LmNvbS9VU0VSVHJ1c3RSU0FBZGRUcnVzdENBLmNydDAlBggrBgEFBQcwAYYZ +aHR0cDovL29jc3AudXNlcnRydXN0LmNvbTANBgkqhkiG9w0BAQwFAAOCAgEAThNA +lsnD5m5bwOO69Bfhrgkfyb/LDCUW8nNTs3Yat6tIBtbNAHwgRUNFbBZaGxNh10m6 +pAKkrOjOzi3JKnSj3N6uq9BoNviRrzwB93fVC8+Xq+uH5xWo+jBaYXEgscBDxLmP +bYox6xU2JPti1Qucj+lmveZhUZeTth2HvbC1bP6mESkGYTQxMD0gJ3NR0N6Fg9N3 +OSBGltqnxloWJ4Wyz04PToxcvr44APhL+XJ71PJ616IphdAEutNCLFGIUi7RPSRn +R+xVzBv0yjTqJsHe3cQhifa6ezIejpZehEU4z4CqN2mLYBd0FUiRnG3wTqN3yhsc +SPr5z0noX0+FCuKPkBurcEya67emP7SsXaRfz+bYipaQ908mgWB2XQ8kd5GzKjGf +FlqyXYwcKapInI5v03hAcNt37N3j0VcFcC3mSZiIBYRiBXBWdoY5TtMibx3+bfEO +s2LEPMvAhblhHrrhFYBZlAyuBbuMf1a+HNJav5fyakywxnB2sJCNwQs2uRHY1ihc +6k/+JLcYCpsM0MF8XPtpvcyiTcaQvKZN8rG61ppnW5YCUtCC+cQKXA0o4D/I+pWV +idWkvklsQLI+qGu41SWyxP7x09fn1txDAXYw+zuLXfdKiXyaNb78yvBXAfCNP6CH +MntHWpdLgtJmwsQt6j8k9Kf5qLnjatkYYaA7jBU= +-----END CERTIFICATE----- diff --git a/download/1.7.0/client_cert_issuing_cas.pem b/download/1.7.0/client_cert_issuing_cas.pem new file mode 100644 index 000000000..c3a00c7ae --- /dev/null +++ b/download/1.7.0/client_cert_issuing_cas.pem @@ -0,0 +1,653 @@ +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST Limited Basic CA 1-2 2019 +-----BEGIN CERTIFICATE----- +MIIFuTCCBKGgAwIBAgIDD+VKMA0GCSqGSIb3DQEBCwUAMFMxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxLTArBgNVBAMMJEQtVFJVU1QgTGltaXRl +ZCBCYXNpYyBSb290IENBIDEgMjAxOTAeFw0xOTA4MjAxMjMyMjJaFw0zNDA2MTkw +ODE1NTFaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAo +BgNVBAMMIUQtVFJVU1QgTGltaXRlZCBCYXNpYyBDQSAxLTIgMjAxOTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBANxlUGXW81Y2JG/BtEO5dlbELYat4Zx9 +5b4RUux5scPTZX3wrEW+PK4EwQCvV8FH0SoDatOJcFiGduX2r29c0aFFyVKu6xHF +DApYNYV99+z5TiqXFdVkOUti56r10KsaO3FkcgAt4wDFgYd0dDseYo2SQqpKeqFR +QMVQVdLCt66yU8qbiaZ/sL2pcNsJMD/DkEV/axpTwzzk6H+kGUIJ+jpKpYw2pMFF +wYlqW91ICfLtTHvJqFb3DZ7yFNSiXgYBYH9R142vjflh1vg+GuqORiTLi/AhIjlb +3XUAFIZzJ77+PLQprYlRHGGBMaJ+3VbI+hWPTHpwVt6wHNVcfHUnA3kCAwEAAaOC +ApcwggKTMB8GA1UdIwQYMBaAFONo4hcITMmOHGJEDKkpkQJiC6OTMIIBMgYIKwYB +BQUHAQEEggEkMIIBIDBABggrBgEFBQcwAYY0aHR0cDovL2xpbWl0ZWQtYmFzaWMt +cm9vdC1jYS0xLTIwMTkub2NzcC5kLXRydXN0Lm5ldDBTBggrBgEFBQcwAoZHaHR0 +cDovL3d3dy5kLXRydXN0Lm5ldC9jZ2ktYmluL0QtVFJVU1RfTGltaXRlZF9CYXNp +Y19Sb290X0NBXzFfMjAxOS5jcnQwgYYGCCsGAQUFBzAChnpsZGFwOi8vZGlyZWN0 +b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBMaW1pdGVkJTIwQmFzaWMlMjBS +b290JTIwQ0ElMjAxJTIwMjAxOSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y0FDZXJ0 +aWZpY2F0ZT9iYXNlPzAYBgNVHSAEETAPMA0GCysGAQQBpTQCg3QBMIHcBgNVHR8E +gdQwgdEwgc6ggcuggciGgYBsZGFwOi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NO +PUQtVFJVU1QlMjBMaW1pdGVkJTIwQmFzaWMlMjBSb290JTIwQ0ElMjAxJTIwMjAx +OSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlz +dIZDaHR0cDovL2NybC5kLXRydXN0Lm5ldC9jcmwvZC10cnVzdF9saW1pdGVkX2Jh +c2ljX3Jvb3RfY2FfMV8yMDE5LmNybDAdBgNVHQ4EFgQU0A0+3Aiv40EIZuDc8vqZ +ai3fGLkwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwDQYJKoZI +hvcNAQELBQADggEBAH8NXqPrIcKiZC51vfxvajB1HhFnRFFN/G3ZU4yR7XI+uGec +DjR8tOHdFYFmZG4qbDl70ZuRG4bs6H8cvfWyo1NmWZqjAkr6o1kIRTnFwn4JsssJ +7HR2RmJ4ar0C9miIk9sTNLwKy1/kBvCFqssdKdQwBSi85KRxPFYvv+vnMCvSL0Ob ++65q6V7QzvCk7ojiSrcfvHS8QnHJE9ReFRKD4KXAd7+OcZc1K3Mf+uNNHt3CP3ie +DN9K90sI81IWucEeN2NYvw/tJNDH5L4Ah3cn8XzxQVzOfAnn1isf2pci1IEj5f3Y +9JA7LYLLeH7n4+E5JWRiIUAhqNhQTchmwKLdR+E= +-----END CERTIFICATE----- +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST Limited Basic CA 1-3 2019 +-----BEGIN CERTIFICATE----- +MIIFvzCCBKegAwIBAgIDD+UsMA0GCSqGSIb3DQEBCwUAMFMxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxLTArBgNVBAMMJEQtVFJVU1QgTGltaXRl +ZCBCYXNpYyBSb290IENBIDEgMjAxOTAeFw0xOTA2MjAxMTQ2NDlaFw0zNDA2MTkw +ODE1NTFaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAo +BgNVBAMMIUQtVFJVU1QgTGltaXRlZCBCYXNpYyBDQSAxLTMgMjAxOTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAL7RBVVVynr23OCuKpkrzJK87yspTQVh +vJRTtHS7zXG0jzpNLUV8D6Qjd3nOVs2VnW+rpfgQmFeic3E8G5jJmGv18O6LOGJS +CXbiF2eonBQ0oTF97oVukj/8SRYWEBjeBjB//U3gwdt14qATw1zk3B8K/u0zxtQ0 +1xMTL5ckilQ7/+x+RVCGanvx/FscQ1He0fkhfLgqaDJimWu7b0phwZqyyI1GEjEN +9FLWJxh4OeIzupAT4b1j5oaXHCY4BGT5zk6PbOYC7U2Jnt4zKU+pJVIJ+EhYOJS5 +tQM7YFG/eECAHcBtgdUIDgBZqrrx+k14aSQcW701sGqQT7cKcLen+yMCAwEAAaOC +Ap0wggKZMB8GA1UdIwQYMBaAFONo4hcITMmOHGJEDKkpkQJiC6OTMIIBMgYIKwYB +BQUHAQEEggEkMIIBIDBABggrBgEFBQcwAYY0aHR0cDovL2xpbWl0ZWQtYmFzaWMt +cm9vdC1jYS0xLTIwMTkub2NzcC5kLXRydXN0Lm5ldDBTBggrBgEFBQcwAoZHaHR0 +cDovL3d3dy5kLXRydXN0Lm5ldC9jZ2ktYmluL0QtVFJVU1RfTGltaXRlZF9CYXNp +Y19Sb290X0NBXzFfMjAxOS5jcnQwgYYGCCsGAQUFBzAChnpsZGFwOi8vZGlyZWN0 +b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBMaW1pdGVkJTIwQmFzaWMlMjBS +b290JTIwQ0ElMjAxJTIwMjAxOSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y0FDZXJ0 +aWZpY2F0ZT9iYXNlPzAYBgNVHSAEETAPMA0GCysGAQQBpTQCg3QBMIHiBgNVHR8E +gdowgdcwgYmggYaggYOGgYBsZGFwOi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NO +PUQtVFJVU1QlMjBMaW1pdGVkJTIwQmFzaWMlMjBSb290JTIwQ0ElMjAxJTIwMjAx +OSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlz +dDBJoEegRYZDaHR0cDovL2NybC5kLXRydXN0Lm5ldC9jcmwvZC10cnVzdF9saW1p +dGVkX2Jhc2ljX3Jvb3RfY2FfMV8yMDE5LmNybDAdBgNVHQ4EFgQUZeInnG91+3Vn +CLczeuOdjnssKIgwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAw +DQYJKoZIhvcNAQELBQADggEBAJLvDm1ZuBMhO2qR2R4H/GDDYETRBNqvtvWHRvBw +vryhtDm7tovqDg2v7x+vcSqZApFVW+zs+OvzNRXtyyIlkqxP5CL0okpHqqmKaaHn +tH8D93pV/p7xE39gFE6NNceSx6DHBxuOcOEha8zA8ixH+j+fzLX6SEOAhBIfCgDb +qg9Xtxi7+uupq4koQcXrDNTRHxuoNxAHnwYtgapwKwyBwMMaLliqxVyDcwN6aJEQ +tVoyOibUnUXej4bXh8FPCSU7m98nLY9aKk1O30jsgSiLxuVKBav8JaDPY1i69CSd +vn2adNlHjbl/57GVQ4VoTDzlmhRQGR9TIRPYA5F6VdeI4ak= +-----END CERTIFICATE----- +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST SSL Class 3 CA 1 2009 +-----BEGIN CERTIFICATE----- +MIIFMjCCBBqgAwIBAgIDCZBjMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMTIxMjQ2NTVaFw0yOTExMDUwODM1NTha +MEwxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJjAkBgNVBAMM +HUQtVFJVU1QgU1NMIENsYXNzIDMgQ0EgMSAyMDA5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAoal0SyLSijE0JkuhHJmOCbmQznyxuSY7DaEwhUsdUpI+ +2llkDLz6s9BWQe1zCVXDhrt3qz5U5H4h6jxm5Ec+ZbFiU3Gv2yxpI5cRPrqj9mJU +1CGgy1+29khuUnoopzSq66HPuGZGh06I7bJkXTQ7AQ92z1MdL2wATj1UWdNid3sQ +NiWIm+69nURHY6tmCNenNcG6aV4qjHMUPsjpCRabNY9nUO12rsmiDW2mbAC3gcxQ +lqLgLYur9HvB8cW0xu2JZ/B3PXmNphVuWskp3Y1u0SvIYzuEsE7lWDbBmtWZtabB +hzThkDQvd+3keQ1sU/beq1NeXfgKzQ5G+4Ql2PUY/wIDAQABo4ICGjCCAhYwHwYD +VR0jBBgwFoAU/doUxJ8w3iG9HkI5/KtjI0ng8YQwRAYIKwYBBQUHAQEEODA2MDQG +CCsGAQUFBzABhihodHRwOi8vcm9vdC1jMy1jYTItMjAwOS5vY3NwLmQtdHJ1c3Qu +bmV0MF8GA1UdIARYMFYwVAYEVR0gADBMMEoGCCsGAQUFBwIBFj5odHRwOi8vd3d3 +LmQtdHJ1c3QubmV0L2ludGVybmV0L2ZpbGVzL0QtVFJVU1RfUm9vdF9QS0lfQ1BT +LnBkZjAzBgNVHREELDAqgRBpbmZvQGQtdHJ1c3QubmV0hhZodHRwOi8vd3d3LmQt +dHJ1c3QubmV0MIHTBgNVHR8EgcswgcgwgYCgfqB8hnpsZGFwOi8vZGlyZWN0b3J5 +LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwMjAwOSxPPUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZv +Y2F0aW9ubGlzdDBDoEGgP4Y9aHR0cDovL3d3dy5kLXRydXN0Lm5ldC9jcmwvZC10 +cnVzdF9yb290X2NsYXNzXzNfY2FfMl8yMDA5LmNybDAdBgNVHQ4EFgQUUBkylJrE +tQRNVtDAgyHVNVWwsXowDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBABM5QRHX/yInsmZLWVlvmWmKb3c4IB3hAIVR +sAGhkvQJ/RD1GZjZUBBYMWkD1P37fTQxlqTOe3NecVvElkYZuCq7HSM6o7awzb3m +yLn1kN+hDCsxX0EYbVSNjEjkW3QEkqJH9owH4qeMDxf7tfXB7BVKO+rarYPa2PR8 +Wz2KhjFDmAeFg2J89YcpeJJEEJXoweAkgJEEwwEIfJ2yLjYo78RD0Rvij/+zkfj9 ++dSvTiZTuqicyo37qNoYHgchuqXnKodhWkW89oo2NKhfeNHHbqvXEJmx0PbI6YyQ +50GnYECZRHNKhgbPEtNy/QetU53aWlTlvu4NIwLW5XVsrxlQ2Zw= +-----END CERTIFICATE----- +Subject: C=DE,O=Verein zur Foerderung eines Deutschen Forschungsnetzes e. V.,OU=DFN-PKI,CN=DFN-Verein Global Issuing CA +-----BEGIN CERTIFICATE----- +MIIFrDCCBJSgAwIBAgIHG2O60B4sPTANBgkqhkiG9w0BAQsFADCBlTELMAkGA1UE +BhMCREUxRTBDBgNVBAoTPFZlcmVpbiB6dXIgRm9lcmRlcnVuZyBlaW5lcyBEZXV0 +c2NoZW4gRm9yc2NodW5nc25ldHplcyBlLiBWLjEQMA4GA1UECxMHREZOLVBLSTEt +MCsGA1UEAxMkREZOLVZlcmVpbiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4X +DTE2MDUyNDExMzg0MFoXDTMxMDIyMjIzNTk1OVowgY0xCzAJBgNVBAYTAkRFMUUw +QwYDVQQKDDxWZXJlaW4genVyIEZvZXJkZXJ1bmcgZWluZXMgRGV1dHNjaGVuIEZv +cnNjaHVuZ3NuZXR6ZXMgZS4gVi4xEDAOBgNVBAsMB0RGTi1QS0kxJTAjBgNVBAMM +HERGTi1WZXJlaW4gR2xvYmFsIElzc3VpbmcgQ0EwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCdO3kcR94fhsvGadcQnjnX2aIw23IcBX8pX0to8a0Z1kzh +axuxC3+hq+B7i4vYLc5uiDoQ7lflHn8EUTbrunBtY6C+li5A4dGDTGY9HGRp5Zuk +rXKuaDlRh3nMF9OuL11jcUs5eutCp5eQaQW/kP+kQHC9A+e/nhiIH5+ZiE0OR41I +X2WZENLZKkntwbktHZ8SyxXTP38eVC86rpNXp354ytVK4hrl7UF9U1/Isyr1ijCs +7RcFJD+2oAsH/U0amgNSoDac3iSHZeTn+seWcyQUzdDoG2ieGFmudn730Qp4PIdL +sDfPU8o6OBDzy0dtjGQ9PFpFSrrKgHy48+enTEzNAgMBAAGjggIFMIICATASBgNV +HRMBAf8ECDAGAQH/AgEBMA4GA1UdDwEB/wQEAwIBBjApBgNVHSAEIjAgMA0GCysG +AQQBga0hgiweMA8GDSsGAQQBga0hgiwBAQQwHQYDVR0OBBYEFGs6mIv58lOJ2uCt +sjIeCR/oqjt0MB8GA1UdIwQYMBaAFJPj2DIm2tXxSqWRSuDqS+KiDM/hMIGPBgNV +HR8EgYcwgYQwQKA+oDyGOmh0dHA6Ly9jZHAxLnBjYS5kZm4uZGUvZ2xvYmFsLXJv +b3QtZzItY2EvcHViL2NybC9jYWNybC5jcmwwQKA+oDyGOmh0dHA6Ly9jZHAyLnBj +YS5kZm4uZGUvZ2xvYmFsLXJvb3QtZzItY2EvcHViL2NybC9jYWNybC5jcmwwgd0G +CCsGAQUFBwEBBIHQMIHNMDMGCCsGAQUFBzABhidodHRwOi8vb2NzcC5wY2EuZGZu +LmRlL09DU1AtU2VydmVyL09DU1AwSgYIKwYBBQUHMAKGPmh0dHA6Ly9jZHAxLnBj +YS5kZm4uZGUvZ2xvYmFsLXJvb3QtZzItY2EvcHViL2NhY2VydC9jYWNlcnQuY3J0 +MEoGCCsGAQUFBzAChj5odHRwOi8vY2RwMi5wY2EuZGZuLmRlL2dsb2JhbC1yb290 +LWcyLWNhL3B1Yi9jYWNlcnQvY2FjZXJ0LmNydDANBgkqhkiG9w0BAQsFAAOCAQEA +gXhFpE6kfw5V8Amxaj54zGg1qRzzlZ4/8/jfazh3iSyNta0+x/KUzaAGrrrMqLGt +Mwi2JIZiNkx4blDw1W5gjU9SMUOXRnXwYuRuZlHBQjFnUOVJ5zkey5/KhkjeCBT/ +FUsrZpugOJ8Azv2n69F/Vy3ITF/cEBGXPpYEAlyEqCk5bJT8EJIGe57u2Ea0G7UD +DDjZ3LCpP3EGC7IDBzPCjUhjJSU8entXbveKBTjvuKCuL/TbB9VbhBjBqbhLzmyQ +GoLkuT36d/HSHzMCv1PndvncJiVBby+mG/qkE5D6fH7ZC2Bd7L/KQaBh+xFJKdio +LXUV2EoY6hbvVTQiGhONBg== +-----END CERTIFICATE----- +Subject: C=DE,ST=Bayern,L=Muenchen,O=Fraunhofer,OU=Fraunhofer Corporate PKI,CN=Fraunhofer User CA - G02 +-----BEGIN CERTIFICATE----- +MIIFqzCCBJOgAwIBAgIHG2O6xotSQjANBgkqhkiG9w0BAQsFADCBlTELMAkGA1UE +BhMCREUxRTBDBgNVBAoTPFZlcmVpbiB6dXIgRm9lcmRlcnVuZyBlaW5lcyBEZXV0 +c2NoZW4gRm9yc2NodW5nc25ldHplcyBlLiBWLjEQMA4GA1UECxMHREZOLVBLSTEt +MCsGA1UEAxMkREZOLVZlcmVpbiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4X +DTE2MDUyNDExMzgzMFoXDTMxMDIyMjIzNTk1OVowgYwxCzAJBgNVBAYTAkRFMQ8w +DQYDVQQIDAZCYXllcm4xETAPBgNVBAcMCE11ZW5jaGVuMRMwEQYDVQQKDApGcmF1 +bmhvZmVyMSEwHwYDVQQLDBhGcmF1bmhvZmVyIENvcnBvcmF0ZSBQS0kxITAfBgNV +BAMMGEZyYXVuaG9mZXIgVXNlciBDQSAtIEcwMjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAL1FxEM9lv2hli0ZvDCDNvVn2mIcIJ4cvKUQ30l5ZARkKEtY +KlqSsrof2WmWFdxx1DPwFtp38QgbsbOPKSDoYiWw+ycW403f1ZJAvn6c5N07oT1A +j3xzYG/QxgY+qkLhKJnV0pr7afC+VIFB/wmNFhvlyXpAVy07NOikiRFlBzw/s4/H +HdHTL2VKEjCnfD6rzKtTN+D+24UYXiMvNzmPyWQMQhIxuDMPTwfoqBFL7leo334z +gHm78leaLK52DTzpHvlNHP4DnTRa2ipVpyj3yQWcOBw+75oVE71AbyFPzw1a0J9P +wisZljQ0eTb6/tkF81W5f6UD7NN4Ie2gxEvZkysCAwEAAaOCAgUwggIBMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMCkGA1UdIAQiMCAwDQYLKwYB +BAGBrSGCLB4wDwYNKwYBBAGBrSGCLAEBBDAdBgNVHQ4EFgQUoyb1mk/b2jvhtaru +WIvQ/P+1bVEwHwYDVR0jBBgwFoAUk+PYMiba1fFKpZFK4OpL4qIMz+EwgY8GA1Ud +HwSBhzCBhDBAoD6gPIY6aHR0cDovL2NkcDEucGNhLmRmbi5kZS9nbG9iYWwtcm9v +dC1nMi1jYS9wdWIvY3JsL2NhY3JsLmNybDBAoD6gPIY6aHR0cDovL2NkcDIucGNh +LmRmbi5kZS9nbG9iYWwtcm9vdC1nMi1jYS9wdWIvY3JsL2NhY3JsLmNybDCB3QYI +KwYBBQUHAQEEgdAwgc0wMwYIKwYBBQUHMAGGJ2h0dHA6Ly9vY3NwLnBjYS5kZm4u +ZGUvT0NTUC1TZXJ2ZXIvT0NTUDBKBggrBgEFBQcwAoY+aHR0cDovL2NkcDEucGNh +LmRmbi5kZS9nbG9iYWwtcm9vdC1nMi1jYS9wdWIvY2FjZXJ0L2NhY2VydC5jcnQw +SgYIKwYBBQUHMAKGPmh0dHA6Ly9jZHAyLnBjYS5kZm4uZGUvZ2xvYmFsLXJvb3Qt +ZzItY2EvcHViL2NhY2VydC9jYWNlcnQuY3J0MA0GCSqGSIb3DQEBCwUAA4IBAQDE +klJ5thEqcbIyroBrhnWl00oAUXYuYQqXDpBTCzF08aBk41FyLZXH54w7NUecdTZp +aq0SgueogbSv9viT8EzQ5Bt6bapOl5pVjEb6yBrV0Y3d5Vb3h8WA+/E+DRz/uV5M +gKdVHdazimuXK1HHf0UHSFiEkgiY3xSQr6CRH7UzzivWj9s6B2GUuP4zzfKrgAxQ +N2IaEUtvWiwl5Mtv55p6wmnBOhCnCtj1Ge5Du7jR8o2xAh//fZ4tz5UQ7XXVoeu5 +iueZp3qNb0cTtiGGPJxVtFmoaFNltp+zI/y52hF9XgjC+mJBVYYahmacW3wMpIAO +R0lstmlDfiKvecyE6S4v +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT OV ECC CA 4 +-----BEGIN CERTIFICATE----- +MIIDeTCCAv+gAwIBAgIRAOuOgRlxKfSvZO+BSi9QzukwCgYIKoZIzj0EAwMwgYgx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtKZXJz +ZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYDVQQD +EyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIwMDIx +ODAwMDAwMFoXDTMzMDUwMTIzNTk1OVowRDELMAkGA1UEBhMCTkwxGTAXBgNVBAoT +EEdFQU5UIFZlcmVuaWdpbmcxGjAYBgNVBAMTEUdFQU5UIE9WIEVDQyBDQSA0MFkw +EwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEXYkvGrfrMs2IwdI5+IwpEwPh+igW/BOW +etmOwP/ZIXC8fNeC3/ZYPAAMyRpFS0v3/c55FDTE2xbOUZ5zeVZYQqOCAYswggGH +MB8GA1UdIwQYMBaAFDrhCYbUzxnClnZ0SXbc4DXGY2OaMB0GA1UdDgQWBBTttKAz +ahsIkba9+kGSvZqrq2P0UzAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB +/wIBADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwOAYDVR0gBDEwLzAt +BgRVHSAAMCUwIwYIKwYBBQUHAgEWF2h0dHBzOi8vc2VjdGlnby5jb20vQ1BTMFAG +A1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VU0VSVHJ1 +c3RFQ0NDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDB2BggrBgEFBQcBAQRqMGgw +PwYIKwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RF +Q0NBZGRUcnVzdENBLmNydDAlBggrBgEFBQcwAYYZaHR0cDovL29jc3AudXNlcnRy +dXN0LmNvbTAKBggqhkjOPQQDAwNoADBlAjAfs9nsM0qaJGVu6DpWVy4qojiOpwV1 +h/MWZ5GJxy6CKv3+RMB3STkaFh0+Hifbk24CMQDRf/ujXAQ1b4nFpZGaSIKldygc +dCDAxbAd9tlxcN/+J534CJDblzd/40REzGWwS5k= +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT OV RSA CA 4 +-----BEGIN CERTIFICATE----- +MIIG5TCCBM2gAwIBAgIRANpDvROb0li7TdYcrMTz2+AwDQYJKoZIhvcNAQEMBQAw +gYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtK +ZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYD +VQQDEyVVU0VSVHJ1c3QgUlNBIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIw +MDIxODAwMDAwMFoXDTMzMDUwMTIzNTk1OVowRDELMAkGA1UEBhMCTkwxGTAXBgNV +BAoTEEdFQU5UIFZlcmVuaWdpbmcxGjAYBgNVBAMTEUdFQU5UIE9WIFJTQSBDQSA0 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEApYhi1aEiPsg9ZKRMAw9Q +r8Mthsr6R20VSfFeh7TgwtLQi6RSRLOh4or4EMG/1th8lijv7xnBMVZkTysFiPmT +PiLOfvz+QwO1NwjvgY+Jrs7fSoVA/TQkXzcxu4Tl3WHi+qJmKLJVu/JOuHud6mOp +LWkIbhODSzOxANJ24IGPx9h4OXDyy6/342eE6UPXCtJ8AzeumTG6Dfv5KVx24lCF +TGUzHUB+j+g0lSKg/Sf1OzgCajJV9enmZ/84ydh48wPp6vbWf1H0O3Rd3LhpMSVn +TqFTLKZSbQeLcx/l9DOKZfBCC9ghWxsgTqW9gQ7v3T3aIfSaVC9rnwVxO0VjmDdP +FNbdoxnh0zYwf45nV1QQgpRwZJ93yWedhp4ch1a6Ajwqs+wv4mZzmBSjovtV0mKw +d+CQbSToalEUP4QeJq4Udz5WNmNMI4OYP6cgrnlJ50aa0DZPlJqrKQPGL69KQQz1 +2WgxvhCuVU70y6ZWAPopBa1ykbsttpLxADZre5cH573lIuLHdjx7NjpYIXRx2+QJ +URnX2qx37eZIxYXz8ggM+wXH6RDbU3V2o5DP67hXPHSAbA+p0orjAocpk2osxHKo +NSE3LCjNx8WVdxnXvuQ28tKdaK69knfm3bB7xpdfsNNTPH9ElcjscWZxpeZ5Iij8 +lyrCG1z0vSWtSBsgSnUyG/sCAwEAAaOCAYswggGHMB8GA1UdIwQYMBaAFFN5v1qq +K0rPVIDh2JvAnfKyA2bLMB0GA1UdDgQWBBRvHTVJEGwy+lmgnryK6B+VvnF6DDAO +BgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAUBggr +BgEFBQcDAQYIKwYBBQUHAwIwOAYDVR0gBDEwLzAtBgRVHSAAMCUwIwYIKwYBBQUH +AgEWF2h0dHBzOi8vc2VjdGlnby5jb20vQ1BTMFAGA1UdHwRJMEcwRaBDoEGGP2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FDZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDB2BggrBgEFBQcBAQRqMGgwPwYIKwYBBQUHMAKGM2h0dHA6 +Ly9jcnQudXNlcnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FBZGRUcnVzdENBLmNydDAl +BggrBgEFBQcwAYYZaHR0cDovL29jc3AudXNlcnRydXN0LmNvbTANBgkqhkiG9w0B +AQwFAAOCAgEAUtlC3e0xj/1BMfPhdQhUXeLjb0xp8UE28kzWE5xDzGKbfGgnrT2R +lw5gLIx+/cNVrad//+MrpTppMlxq59AsXYZW3xRasrvkjGfNR3vt/1RAl8iI31lG +hIg6dfIX5N4esLkrQeN8HiyHKH6khm4966IkVVtnxz5CgUPqEYn4eQ+4eeESrWBh +AqXaiv7HRvpsdwLYekAhnrlGpioZ/CJIT2PTTxf+GHM6cuUnNqdUzfvrQgA8kt1/ +ASXx2od/M+c8nlJqrGz29lrJveJOSEMX0c/ts02WhsfMhkYa6XujUZLmvR1Eq08r +48/EZ4l+t5L4wt0DV8VaPbsEBF1EOFpz/YS2H6mSwcFaNJbnYqqJHIvm3PLJHkFm +EoLXRVrQXdCT+3wgBfgU6heCV5CYBz/YkrdWES7tiiT8sVUDqXmVlTsbiRNiyLs2 +bmEWWFUl76jViIJog5fongEqN3jLIGTG/mXrJT1UyymIcobnIGrbwwRVz/mpFQo0 +vBYIi1k2ThVh0Dx88BbF9YiP84dd8Fkn5wbE6FxXYJ287qfRTgmhePecPc73Yrzt +apdRcsKVGkOpaTIJP/l+lAHRLZxk/dUtyN95G++bOSQqnOCpVPabUGl2E/OEyFrp +Ipwgu2L/WJclvd6g+ZA/iWkLSMcpnFb+uX6QBqvD6+RNxul1FaB5iHY= +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT Personal CA 4 +-----BEGIN CERTIFICATE----- +MIIG5jCCBM6gAwIBAgIQMQJw1DW+mySa+FbQ4eKFSTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMjAw +MjE4MDAwMDAwWhcNMzMwNTAxMjM1OTU5WjBGMQswCQYDVQQGEwJOTDEZMBcGA1UE +ChMQR0VBTlQgVmVyZW5pZ2luZzEcMBoGA1UEAxMTR0VBTlQgUGVyc29uYWwgQ0Eg +NDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALNK4iJeJ1vpBFsUBDUy +IBSutNIxQMbNUMAeoUTKr55KYX8tkN5imzNqLaRCypYBPP9wED2AaO6e8njkbjzJ +wLgPqDBkW9sG3kmi3GW6cF4Hwr5ysZqve/5EJDhV+9OhfTu/4dMnoR4Q41HcjMk9 +MzLOADAQ0awBZ/29r0d49AUmIKELNeqEqmnTN6fndL7x/2K0TLToZLxqS7sy/Jvi +0wEFr0CfdjcAsioh7KaD+Jizyb1aRKQzJ6Q20VEHX7UqWc1SkzTkbz6xj0S5ydBB +FQh0fNiy+qM/deVpK4HgmPSJrrpQZ+LlbHfWabmwoDPxF71QZVYiqrrAoUrGRJ+4 +7iLBiIg8miIYS7Hd2ppvAUt24CugMXUjETjQ+oYh09fNi5n/AvoER8UBvTHLxt+b +lL0bvL+2z2YiUWk+2Qtn+dD+JU5Z2y71qV7+cr+4YXjvGzF5bYsi8HiwflTb4Php +3y+k1twKtchdcq2QGc0eDG6Y01nRHUiyr8/PtMAsLHEPNZ2wzsA7fb8mftHiV20Z +FmYqknJ8AIOfwdTVA+E62JayOJ+sxadqcmFDorsz/mrPwGZ8+txr4xSuvVjg0dlv +0yuA+1YpBDIYNfL4bkX+IcZ1mTstL4Xw0f4N2iW3bBmnPnYmoYxMM8gflCiTgss7 +3nBvG2f7v1PD7BDGYNO4iD4vAgMBAAGjggGLMIIBhzAfBgNVHSMEGDAWgBRTeb9a +qitKz1SA4dibwJ3ysgNmyzAdBgNVHQ4EFgQUaQChxyFY+ODFGyCwCt2nUb8T2eQw +DgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0lBBYwFAYI +KwYBBQUHAwIGCCsGAQUFBwMEMDgGA1UdIAQxMC8wLQYEVR0gADAlMCMGCCsGAQUF +BwIBFhdodHRwczovL3NlY3RpZ28uY29tL0NQUzBQBgNVHR8ESTBHMEWgQ6BBhj9o +dHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVNFUlRydXN0UlNBQ2VydGlmaWNhdGlv +bkF1dGhvcml0eS5jcmwwdgYIKwYBBQUHAQEEajBoMD8GCCsGAQUFBzAChjNodHRw +Oi8vY3J0LnVzZXJ0cnVzdC5jb20vVVNFUlRydXN0UlNBQWRkVHJ1c3RDQS5jcnQw +JQYIKwYBBQUHMAGGGWh0dHA6Ly9vY3NwLnVzZXJ0cnVzdC5jb20wDQYJKoZIhvcN +AQEMBQADggIBAAoFTnsNjx8TOQD9b+xixsPt7Req4wHMeNw/R5dddEPgQAQAYJZK +z5BEv1cjGbH7nbPH3AxrxhN6OVH40p6OLIo9MXSrrfMzGs7/P+FTCjwgNxFEtLQ1 +KC9NboA3asJcl7mIs3l8h9iAgEH1zLUvq2s+5n++NQmbzudDsTFDMapY3kX1TwyU +CTRzmItqcbsYIyg2MeIXWfRtqPqC5R4bufmpzA5BPINLX340Sp/CNQ9QZqw3Vkfy +HWwTo+vO9Gm2L6srNamJT6Lb+TeXZvl8UPL5a72O/pH0GgGHjt6z9QzPARnaRKsh +VWviNK6ST4WmZHllu3CJg0BXqx1vWyswawgvNeWt1qxITacYe9mSWTbNR2CftvTU +werruDSY2jMaZPoNqbjUpuG/blYwWzzvVerBUhviAahPXJF/9V48ybWPBq6qKOEo +kW+s3B4ad5sY96KlovEijaIQDip1HO0SD+rLNYaiBcr9MV2aK+DfbZ8w9BaNCQyF +EYwzxIKOVk3bYvzHRk5ihUDascmbk/bkiNl74c/KfuKQmJImaqWoWZR6jBcXcPV0 +WUIKz/nILTpFhGojZEQW77by3aezAi9jrEIUBHRG1LwzPbJc2V3SOzYyaJFQatzu +KZbN1Q9s9y/2x1QXtKwREY8jNgvx0iIfOK35gKgYJJcyDql4XfuEc2nV +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT Personal ECC CA 4 +-----BEGIN CERTIFICATE----- +MIIDfjCCAwSgAwIBAgIQdpAhff5d1sLEUCfF3NFaJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMjAwMjE4 +MDAwMDAwWhcNMzMwNTAxMjM1OTU5WjBKMQswCQYDVQQGEwJOTDEZMBcGA1UEChMQ +R0VBTlQgVmVyZW5pZ2luZzEgMB4GA1UEAxMXR0VBTlQgUGVyc29uYWwgRUNDIENB +IDQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQYZ2cR4O5tfdskg2NNo346Noyo +enQjWhWQElTQQ+brmMxr5leW1AfpAJ9bsaUadsCJJnA5ycFITYjXfl/XvfPko4IB +izCCAYcwHwYDVR0jBBgwFoAUOuEJhtTPGcKWdnRJdtzgNcZjY5owHQYDVR0OBBYE +FKgtbYEyZI3msk+s/hHyZZmFE6luMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8E +CDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDA4BgNVHSAE +MTAvMC0GBFUdIAAwJTAjBggrBgEFBQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9D +UFMwUAYDVR0fBEkwRzBFoEOgQYY/aHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VT +RVJUcnVzdEVDQ0NlcnRpZmljYXRpb25BdXRob3JpdHkuY3JsMHYGCCsGAQUFBwEB +BGowaDA/BggrBgEFBQcwAoYzaHR0cDovL2NydC51c2VydHJ1c3QuY29tL1VTRVJU +cnVzdEVDQ0FkZFRydXN0Q0EuY3J0MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC51 +c2VydHJ1c3QuY29tMAoGCCqGSM49BAMDA2gAMGUCMQCCX6P32oo7RiAIk1DIekZM +nFGZwY+xJoZ5HyChGc1Ncuupnh7Ezukr1EnL+MyAhNcCMD6DlSMWE5I++OBvznnX +1npjvntLcKogArAPjLglGGeymFt4U6pdy7/C0/miHCPuDA== +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=GEANT S/MIME ECC 1 +-----BEGIN CERTIFICATE----- +MIIDdjCCAvugAwIBAgIQDJz22V5XPKzFUHJkSyvftjAKBggqhkjOPQQDAzBvMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBFQ0Mg +Um9vdCBDQSAyMDIxMB4XDTI1MDEwMzExMTE0MFoXDTM5MTIzMTExMTEzOVowYzEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExGzAZBgNVBAMMEkdFQU5UIFMvTUlNRSBFQ0Mg +MTB2MBAGByqGSM49AgEGBSuBBAAiA2IABHHzZBx8BXADofSXSxbumNUgOW7oPUFD +MTBf97xIFnUvxGf449zwH17IBxs6EnsIRSZ80+3fMPGm7PcW4bogQmpStkHsbhl/ +yc+R0YudJcnXnC+HuGwkKlk9hoXX+gtBJ6OCAWYwggFiMBIGA1UdEwEB/wQIMAYB +Af8CAQAwHwYDVR0jBBgwFoAUUgjSvjKBJf31GpfsTl8au1PNkK0wUAYIKwYBBQUH +AQEERDBCMEAGCCsGAQUFBzAChjRodHRwOi8vY3J0LmhhcmljYS5nci9IQVJJQ0Et +Q2xpZW50LVJvb3QtMjAyMS1FQ0MuY2VyMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8G +CCsGAQUFBwIBFiNodHRwOi8vcmVwby5oYXJpY2EuZ3IvZG9jdW1lbnRzL0NQUzAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwRQYDVR0fBD4wPDA6oDigNoY0 +aHR0cDovL2NybC5oYXJpY2EuZ3IvSEFSSUNBLUNsaWVudC1Sb290LTIwMjEtRUND +LmNybDAdBgNVHQ4EFgQUccTTotYuiaWImnarwbliZz8kZeswDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2kAMGYCMQDbRE4Sf4j5cdd9PlC4xjnNvJxfsDX5ouYb +3ffJ6ukmEjI7RnHm6xJ2V++40nWYVfgCMQDfXaawZpZymK4CBIMHxoViSYBHw/Mm +JOG3trrP+Q4Kb0AfJb/S2ojAD+EAKiiB5hM= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=GEANT S/MIME RSA 1 +-----BEGIN CERTIFICATE----- +MIIGRDCCBCygAwIBAgIQFfmubKqNLtTTb3h/Htx7ATANBgkqhkiG9w0BAQsFADBv +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBS +U0EgUm9vdCBDQSAyMDIxMB4XDTI1MDEwMzExMTMwOFoXDTM5MTIzMTExMTMwN1ow +YzELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBS +ZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExGzAZBgNVBAMMEkdFQU5UIFMvTUlNRSBS +U0EgMTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKu4bq/+byKjHo25 +Xz32YBmO+Wrkmc+UmfcdXSCI7yawwU9JSMEHAAKAASaJpLr9JAyt+tlB/rn/Sazn +SwY4ipBIffR0D5k/ndfiI553dWgI4i/tkOGlNej/7JyE2CS9kTlOOs6pg5HaDpwq +jAhCkje+IByg5gKWH6lzvMJo5jQOtsGB2q6e5cYKwa9LJOAcR8iquds9LFssbHSM +uVdSuTjpAjcGLqWfW++C0YXpWD+UonjQ6lNEuiKUDmrFc+SEtLw56lYtp4uuxm4L +W/HQSsx+oGwMBqaR6HhBQ3LydONjsbcbegRqJZFJoLsnwIHorEag44UIvjXzYJAx +/NTiwVdHldO7cEvWscDbyQLR9koBoliq2HrgYFQs7NQxU+7MLNSh8i6znWVNISUE +g36M//I8BZl4VqD70ELlhKKN7rx+i7BwKOd2gxdWgFJhkPyQu9o+82R9epXiRblo +/rdkyv+2BFR7VpbgPUzncdi8/0h4dP/qQFYnA+Df0FFj7gYczwIDAQABo4IBZjCC +AWIwEgYDVR0TAQH/BAgwBgEB/wIBADAfBgNVHSMEGDAWgBSg1gc9XiT3e6BELiRS +DRmqKwSRpzBQBggrBgEFBQcBAQREMEIwQAYIKwYBBQUHMAKGNGh0dHA6Ly9jcnQu +aGFyaWNhLmdyL0hBUklDQS1DbGllbnQtUm9vdC0yMDIxLVJTQS5jZXIwRAYDVR0g +BD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9yZXBvLmhhcmljYS5n +ci9kb2N1bWVudHMvQ1BTMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDBF +BgNVHR8EPjA8MDqgOKA2hjRodHRwOi8vY3JsLmhhcmljYS5nci9IQVJJQ0EtQ2xp +ZW50LVJvb3QtMjAyMS1SU0EuY3JsMB0GA1UdDgQWBBTrsi87/a4CzCpEBl0lzR0S +ImiwRzAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADveuEX23Dwr +kygKtsF7DmcTGmi8SE20jmJLe0TMT8Nws1NqppE0ACym1agtY1IjUFm5MWabG/Ic +vRTh8sB9cRZgDQMqZLNCLofqL4aj/dKBXH4bwH2MVdjNHBoGvZkyhRz/kBE+x1va +WXclhWQMOX5nVvRMfiEJiYotMP7KM88IaVZ9DkGJJEVftsnUWuvCWUtjagD6XWlq +LHjNl+LufiZ/h9lDvaWqG1/obfdStgofMc30RL+ES6gYKRwZpCA1coFzXV7Cnwx8 +toTl8bReqCNXexKzxlqAcRXPOmlKkJQuqRI297oNuMPnoNZCY+yLnxyd4kZuu0Xc +OTNTpVjM8bvg8ACqhSYanrNDi/zTiTk7gwm9GyH1X45fFNGNEFgpIaApjT2UELuk +DOmP18ZwC4EQeHawPJIqffMEmUJm6qbRPKGnNmcyygh4iZU3QbkRLLp3Z6QV3WoT +Eqyf5mL9qTGS6WJG65L8oaKw1Xh/bdGuVIDyBahpfP2c2pCd0UH6+x73Rrq9GFlO +ijVr2OQSvKhzETNG917SvcURCBhMnIQFUXqHQyIY60eH1po6WtNOq/1K5kpOG6Sq +1RVc02LEit48uK4tRMVUKekSOjruGXW38DmAriPcMHjI6VQbqjc0Sq1VPz76ee4F +M5uLviSUZHYqDDqMWa8LFImK9iiKI8E3 +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=GEANT TLS ECC 1 +-----BEGIN CERTIFICATE----- +MIIDNzCCArygAwIBAgIQQv3c4SYWB+Gl5pNaQAFh3TAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTI1MDEwMzExMTQyMVoXDTM5MTIzMTExMTQyMFowYDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExGDAWBgNVBAMMD0dFQU5UIFRMUyBFQ0MgMTB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABANPWLwh0Za2UqtbLV7/qNRm78zsttgSuvhn73bU +GtxETsVOEZeMUfMjgHw8EwrsSJI9oj0CgZQFFSEY1NJfcxA/NJiOYJUKPsFbpOrY +dr0q4g+aBZsXWeh7bMCzx24g/aOCAS0wggEpMBIGA1UdEwEB/wQIMAYBAf8CAQAw +HwYDVR0jBBgwFoAUyRtTgRL+BNUW0aq8mm+3oJUZbsowTQYIKwYBBQUHAQEEQTA/ +MD0GCCsGAQUFBzAChjFodHRwOi8vY3J0LmhhcmljYS5nci9IQVJJQ0EtVExTLVJv +b3QtMjAyMS1FQ0MuY2VyMBEGA1UdIAQKMAgwBgYEVR0gADAdBgNVHSUEFjAUBggr +BgEFBQcDAgYIKwYBBQUHAwEwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDovL2NybC5o +YXJpY2EuZ3IvSEFSSUNBLVRMUy1Sb290LTIwMjEtRUNDLmNybDAdBgNVHQ4EFgQU +6ZkGjRcfq/uWGlrIW15dXuzanI8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMD +A2kAMGYCMQD2M1caaY2OwmthgmANUQg3LBLI0/2LiCdxa2zNq0G59wVzbjEk0cR/ +px52OegIwRACMQCk+iTmBlR6Xfv6igiiaFiPYfN2HfbcYLWbot5DZ2H1b4JVJV+V +rga7uu50SDG9hf4= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=GEANT TLS RSA 1 +-----BEGIN CERTIFICATE----- +MIIGBTCCA+2gAwIBAgIQFNV782kiKCGaVWf6kWUbIjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTI1MDEwMzExMTUwMFoXDTM5MTIzMTExMTQ1OVowYDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExGDAWBgNVBAMMD0dFQU5UIFRMUyBSU0EgMTCC +AaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAKEEaZSzEzznAPk8IEa17GSG +yJzPTj4cwRY7/vcq2BPT5+IRGxQtaCdgLXIEl2cdPdIkj2eyakFmgMjAtyeju8V8 +dRayQCD/bWjJ7thDlowgLljQaXirxnYbT8bzRHAhCZqBakYgi5KWw9dANLyDHGpX +UdY259ab0lWEaFE5Uu6IzQSMJOAy4l/Twym8GUiy0qMDEBFSlm31C9BXpdHKKAlh +vIjMiKoDeTWl5vZaLB2MMRGY1yW2ftPgIP0/MkX1uFITlvHmmMTngxplH1nybEIJ +FiwHg1KiLk1TprcZgeO2gxE5Lz3wTFWrsUlAzrh5xWmscWkjNi/4BpeuiT5+NExF +czboLnXOfjuci/7bsnPi1/aZN/iKNbJRnngFoLaKVMmqCS7Xo34f+BITatryQZFE +u2oDKExQGlxDBCfYMLgLucX/onpLzUSgeQITNLx6i5tGGbUYH+9Dy3GI66L/5tPj +qzlOsydki8ZYGE5SBJeWCZ2IrhUe0WzZ2b6Zhk6JAQIDAQABo4IBLTCCASkwEgYD +VR0TAQH/BAgwBgEB/wIBADAfBgNVHSMEGDAWgBQKSCOmYKSSCjPqk1vFV+olTb0S +7jBNBggrBgEFBQcBAQRBMD8wPQYIKwYBBQUHMAKGMWh0dHA6Ly9jcnQuaGFyaWNh +LmdyL0hBUklDQS1UTFMtUm9vdC0yMDIxLVJTQS5jZXIwEQYDVR0gBAowCDAGBgRV +HSAAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBCBgNVHR8EOzA5MDeg +NaAzhjFodHRwOi8vY3JsLmhhcmljYS5nci9IQVJJQ0EtVExTLVJvb3QtMjAyMS1S +U0EuY3JsMB0GA1UdDgQWBBSGAXI/jKlw4jEGUxbOAV9becg8OzAOBgNVHQ8BAf8E +BAMCAYYwDQYJKoZIhvcNAQELBQADggIBABkssjQzYrOo4GMsKegaChP16yNe6Sck +cWBymM455R2rMeuQ3zlxUNOEt+KUfgueOA2urp4j6TlPbs/XxpwuN3I1f09Luk5b ++ZgRXM7obE6ZLTerVQWKoTShyl34R2XlK8pEy7+67Ht4lcJzt+K6K5gEuoPSGQDP +ef+fUfmXrFcgBMcMbtfDb9dubFKNZZxo5nAXiqhFMOIyByag3H+tOTuH8zuId9pH +RDsUpAIHJ9/W2WBfLcKav7IKRlNBRD/sPBy903J9WHPKwl8kQSDA+aa7XCYk7bJt +Eyf+7GM9F5cZ7+YyknXqnv/rtQEkTKZdQo5Us18VFe9qqj94tXbLdk7PejJYNB4O +Zlli44Ld7rtqfFlUych7gIxFOmiyxMQQYrYmUi+74lEZvfoNhuref0CupuKpz6O3 +dLv6kO9T10uNdDBoBQTkge3UzHafTIe3R2o3ujXKUGPwyc9m7/FETyKLUCwSU/5O +AVOeBCU8QtkKKjM8AmbpKpe3pHWcyq3R7B3LmIALkMPTydyDfxen65IDqREbVq8N +xjhkJThUz40JqOlN6uqKqeDISj/IoucYwsqW24AlO7ZzNmohQmMi8ep23H4hBSh0 +GBTe2XvkuzaNf92syK8l2HzO+13GLCjzYLTPvXTO9UpK8DGyfGZOuamuwbAnbNpE +3RfjV9IaUQGJ +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT eScience Personal CA 4 +-----BEGIN CERTIFICATE----- +MIIG8DCCBNigAwIBAgIRAKoycu7aGxmmN/byVir07vEwDQYJKoZIhvcNAQEMBQAw +gYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtK +ZXJzZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYD +VQQDEyVVU0VSVHJ1c3QgUlNBIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIw +MDIxODAwMDAwMFoXDTMzMDUwMTIzNTk1OVowTzELMAkGA1UEBhMCTkwxGTAXBgNV +BAoTEEdFQU5UIFZlcmVuaWdpbmcxJTAjBgNVBAMTHEdFQU5UIGVTY2llbmNlIFBl +cnNvbmFsIENBIDQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCVokk9 +uR1UAJRcNg1NTatsuxEwya7FpTRm5wcbSJqSLeUd5gjI2zHzwjaevHwCjN716blp +iyvEVr96aGw2y+FrVo/0yr0+/XoK8WdkB64j3KmOdUttTTXERY1T1vXxvkZhchHl +JvD/VVDIt/V/xg2iDpAa2N3SW0SNSyuAGqtVaCTM4eOEgVT1Nyg5VtQJVLujMrvv +cW6+1W1rLR2O05FxQtINVnrblTKWotAitEv8yMe9qQr1Fz4sdyVj2cBwEk/zQLok +JRCSw981Hoh5kwt4AVMSxjfeSRjx0Wt0C6ioRJ8WAfSWGNDZwnUWZ4nJrW2UWtdJ +Z59t2VByBR3e8MlVTVE4pirg3R5lCCIzPhNLQm25Rap/a4m3+e9A+Jnjgfi3XzSB +fd+gcMh1xBXK+YOOfbnZW4H8T1Ty9I/HKsUp/isLV8TJsFOLZCyuV14qVfi8jmRU +0wYIK4vQRj2M7VxCyH4MPn6lgnyecIieL0b4gFNWhE2waH2gfigvpQWH6bQuSWIh +l0PaRDWFavo1SNTFKdAVQDK7w3Iw3XzOhQnjHgU/idvqd5eaqa2G4VN1vV1pNirC +LXJccKK49zDSS1IFMX9iQC4YFxc/BbWRndI3smg05dcYn7Di73B0EzFFO6dyg0WD +r2N9G/8fgjv89biSARMNzjtg2XR2cLxdT8PAYwIDAQABo4IBizCCAYcwHwYDVR0j +BBgwFoAUU3m/WqorSs9UgOHYm8Cd8rIDZsswHQYDVR0OBBYEFLYvVVqwyWAZ788J +WtHxE51sjMkEMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8ECDAGAQH/AgEAMB0G +A1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDBDA4BgNVHSAEMTAvMC0GBFUdIAAw +JTAjBggrBgEFBQcCARYXaHR0cHM6Ly9zZWN0aWdvLmNvbS9DUFMwUAYDVR0fBEkw +RzBFoEOgQYY/aHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VTRVJUcnVzdFJTQUNl +cnRpZmljYXRpb25BdXRob3JpdHkuY3JsMHYGCCsGAQUFBwEBBGowaDA/BggrBgEF +BQcwAoYzaHR0cDovL2NydC51c2VydHJ1c3QuY29tL1VTRVJUcnVzdFJTQUFkZFRy +dXN0Q0EuY3J0MCUGCCsGAQUFBzABhhlodHRwOi8vb2NzcC51c2VydHJ1c3QuY29t +MA0GCSqGSIb3DQEBDAUAA4ICAQB7IGXk1vGM4J73d8UD3f71UqLxom42Icu/IP0V +nrIRzbAmz+C++7Bir4mqkKWt89dd9ZlIK8Ez5Y97tDEEd2eICo6dTvy5JOxiYsWK +iubpyqmW/K0xjUqutANMxxGXQOJn13RnI9OHSxNXng5OoM66O5Eq08vXdcQbXF3i +qPf95g94OJVoyFwlmYiIMuV6cOCVNShbGKvFPpNB4p+7vr2FJ7ZeHEmiZKMXO8ex +3Uq5j1riVT/4tCemFz7dSpaCkHJ0xj5Ayknj14+t/lwF7IcOdP9/15uDp8HkgNh9 +xXkFqF3wtLkidVvmmbvDmOQiLJL6Hj6MxwEwI7Cf4ZN/UH0eIra7tEgMP+mkdNtR +K3tB4lQNLquWBr25PbzMiK5LfXPGL+odDAb/7mE1ClQtg28gohf+Ms9hQ4y2rZSG +sYUgh+U++Yn+HlYKI26tl7qB6lFcQb6Prc1SELAEFob8yMFkZ3UnXXLRUkLeM50K +WV9s5wu4Rn6Rzp722wdTQoBeaSxZ5bjmT3QerGV2GaRmuYjLxdYGZ5Co07DI+qEJ +Tj4mbpHx/OiqD7tR9i3rHpPyTjpD6MhJ3U4EjZfd6UMWGMz3Zcw0tvJe1cLpNRX8 +yjTdSxF1/H7ni7/IHkK4trIBuc58YPbZXAfYTcPugPwQE6N7RcfQHA4McFX91SZa +DskXUg== +-----END CERTIFICATE----- +Subject: C=NL,O=GEANT Vereniging,CN=GEANT eScience Personal ECC CA 4 +-----BEGIN CERTIFICATE----- +MIIDiDCCAw6gAwIBAgIRAPFVZjGnycu/NlCaWpIPalkwCgYIKoZIzj0EAwMwgYgx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpOZXcgSmVyc2V5MRQwEgYDVQQHEwtKZXJz +ZXkgQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMS4wLAYDVQQD +EyVVU0VSVHJ1c3QgRUNDIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTIwMDIx +ODAwMDAwMFoXDTMzMDUwMTIzNTk1OVowUzELMAkGA1UEBhMCTkwxGTAXBgNVBAoT +EEdFQU5UIFZlcmVuaWdpbmcxKTAnBgNVBAMTIEdFQU5UIGVTY2llbmNlIFBlcnNv +bmFsIEVDQyBDQSA0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE3Oad56Xl15SY +nK3MmKcZUEu17k4jApQvPwTnGUqFxMbgDvAtxJtWqbKk8qvCOcZ/oCyrHkloS6Nf +AiTDTV5bZ6OCAYswggGHMB8GA1UdIwQYMBaAFDrhCYbUzxnClnZ0SXbc4DXGY2Oa +MB0GA1UdDgQWBBTt5lFk6mex24xLv7WAJ5g/2CMeZTAOBgNVHQ8BAf8EBAMCAYYw +EgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwOAYDVR0gBDEwLzAtBgRVHSAAMCUwIwYIKwYBBQUHAgEWF2h0dHBzOi8vc2Vj +dGlnby5jb20vQ1BTMFAGA1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly9jcmwudXNlcnRy +dXN0LmNvbS9VU0VSVHJ1c3RFQ0NDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDB2 +BggrBgEFBQcBAQRqMGgwPwYIKwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRydXN0 +LmNvbS9VU0VSVHJ1c3RFQ0NBZGRUcnVzdENBLmNydDAlBggrBgEFBQcwAYYZaHR0 +cDovL29jc3AudXNlcnRydXN0LmNvbTAKBggqhkjOPQQDAwNoADBlAjA8KoDpeqid +slDXmfwHD7kr0XTY8rOdQBWMzT5uU7nPROEYLK00Dc9w/J4M8CGaLX8CMQDoaX4P +os4y0yfmvRAPaFZxyJi1ZHaZh+G0dX7ggOEyMHmT0P57T6TjdfcBr1G/J/M= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA OV TLS ECC +-----BEGIN CERTIFICATE----- +MIIDcjCCAvigAwIBAgIQbIPKxKaS8zQphK9yBQyPDDAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDMxOTA5MzM1MloXDTM2MDMxNTA5MzM1MVowYjELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExGjAYBgNVBAMMEUhBUklDQSBPViBUTFMgRUNDMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAECLBRkImpVT10VjeoeRvNhOsxxQe+XVQO37r8 +PPKVlSWR/+78tD+FMEUTLGGjVUd4+KFCM2Yc2V3fZyoQIaxUzbhUYwIavuaF0V9l +t0cfPISZ1rcQEHn3yAr1ON1eoT3jo4IBZzCCAWMwEgYDVR0TAQH/BAgwBgEB/wIB +ADAfBgNVHSMEGDAWgBTJG1OBEv4E1RbRqryab7eglRluyjBUBggrBgEFBQcBAQRI +MEYwRAYIKwYBBQUHMAKGOGh0dHA6Ly9yZXBvLmhhcmljYS5nci9jZXJ0cy9IQVJJ +Q0EtVExTLVJvb3QtMjAyMS1FQ0MuY2VyMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8G +CCsGAQUFBwIBFiNodHRwOi8vcmVwby5oYXJpY2EuZ3IvZG9jdW1lbnRzL0NQUzAd +BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwQgYDVR0fBDswOTA3oDWgM4Yx +aHR0cDovL2NybC5oYXJpY2EuZ3IvSEFSSUNBLVRMUy1Sb290LTIwMjEtRUNDLmNy +bDAdBgNVHQ4EFgQUvrSdrMbKJ79Ox9kcg/5aTh6XB58wDgYDVR0PAQH/BAQDAgGG +MAoGCCqGSM49BAMDA2gAMGUCMQCBJIThQHLwid4SHT+YoWXd7tEFwKf6OsIX+M4U +fh2/UAp8bCiB7D/lcAvFj9YPajcCME5DsmcLbYE7D44HlLoqVcr7RDdh84nG6Dsp +8+YS3BnKAIONAWeGq4jawr3lD667Mw== +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA OV TLS RSA +-----BEGIN CERTIFICATE----- +MIIGwTCCBKmgAwIBAgIQHEYUQ2gTTV1QnQ3HytsfuDANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDMxOTA5MzQxN1oXDTM2MDMxNTA5MzQxNlowYjEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExGjAYBgNVBAMMEUhBUklDQSBPViBUTFMgUlNB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBjrEt4NmTflA4DbZjPC +HIHeSKt4GSvZrb8Wr9K/xCe/US+tRc7lMvp0SJSxtfEbb9tNo5I99YYegbgT2JOo +8E/d7fblR/x25dHORtfe2xAO8liK8aswTmb69QXIP81zNnC7juFWgnfDZg8Loz7m +O8qcKeYS7pejngD4YeiVTf3j2jyJ5BI4Y24IWwcO9Er1hW282He04eQTa9z3Ta9K +00jG7foADE9q9ZOgPAiNV3tDzsaGC5HftgdmJurC/t+SD7wTLNSOGeCBf0g3jIhE +QW8FAZYp4OpytgVk8yEtos0izSIXqJn9AMG/HH21EO2tb6S09+8NZEFGh/+GoGuD +vDW2Nhw6FZm/yctI6nvJI4t2du39gyXql15w6ENucbM8nAyn3rcx6sHAjjgGZw98 +PRnaXXj3lMWvRSpWbBBrV0cj7eJaVFhDR+OG/2jPi8xqXaFaFGvInGW43HpQDAE5 +ATZ+8uywd1S8jA484TmyKr2XUx3OIwvpRxZDy0GvHWiqq03liS/d83vFVvZRq34H +AMKSeLpLt6XpucnwNWJyv5RvmzsTzKfL/XrwJ9tkXWWp+sqGuScZNZApKMUhS+7w +NKv84eX/YBKmtuQch6Xgbw0D8JjBk4my35qLw4wGhX8AM1gVErcotCrMwswiSTLy +XJxm/VX2p8xRWCgrS49G2eMCAwEAAaOCAWcwggFjMBIGA1UdEwEB/wQIMAYBAf8C +AQAwHwYDVR0jBBgwFoAUCkgjpmCkkgoz6pNbxVfqJU29Eu4wVAYIKwYBBQUHAQEE +SDBGMEQGCCsGAQUFBzAChjhodHRwOi8vcmVwby5oYXJpY2EuZ3IvY2VydHMvSEFS +SUNBLVRMUy1Sb290LTIwMjEtUlNBLmNlcjBEBgNVHSAEPTA7MDkGBFUdIAAwMTAv +BggrBgEFBQcCARYjaHR0cDovL3JlcG8uaGFyaWNhLmdyL2RvY3VtZW50cy9DUFMw +HQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMEIGA1UdHwQ7MDkwN6A1oDOG +MWh0dHA6Ly9jcmwuaGFyaWNhLmdyL0hBUklDQS1UTFMtUm9vdC0yMDIxLVJTQS5j +cmwwHQYDVR0OBBYEFOCbPX01Y8XsnQdzqKeJIPRPhPujMA4GA1UdDwEB/wQEAwIB +hjANBgkqhkiG9w0BAQsFAAOCAgEAbd2n5s7VSvUO8NUXH11Ml5Pdh+W+AaWEJdwG +8qtnSX+/WL33OsJx1YN5jNjSoEWK4kswoEvrrMP7rg8w920kL5lzgoByQbP3jrJL +Pj4LnvuqIFj6lz9eMA1kgYqy5BvnepQB/smx58K/CNadvXhtxSU+PuIgoaoKexHM +9AWAMIIKFkE/SbYd8lR2mzp5rgeSD+rYExOxvpG/f713fDFRTH+SyqMREw34d2rD +dtSBK5GerrV1F7C//KGM016EWCz59kutui8qyZJNq7dw1BChbEo1ho9ekN5nQ8t/ +ckF7lJkrMoYRyZobJbQs45BfDMXyRFT4u4N1Z+2GyEvgryqlIQfQ3SUamELnQ6Ta +3oia4pLt/SYrRJOJ3I1EhlMgmi9dS/vCiXcDoMqConepk3/gDFtf8NatbQk2+vBW +mkTcSxlAktWFFu3iAN+0hOPQWDtXofUecFVLwNzAFMhVFw8yd9h8AuM3ThZPNSW1 +0IFer8+cEkvBy2VBbg2MtXn3Duu3NGJk9xf1e752048foDAfrRcuPTZdijtvRFJ+ +G1d/8t9mQqer39S8HONbR+1Zx6KjlFbTkq+Jaivcg6X1JapDjDG8PMiaPX8WE5Tt +jmZ8mSoDuXjqXaQ03VFrr+IweD985Ryq/eDM79dQc8SawzNwzLdNkmczMhLfE9ut +wIHJbMg= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA S/MIME ECC +-----BEGIN CERTIFICATE----- +MIIDejCCAwGgAwIBAgIQR+QjpXN0lyzfL5Q4f0wP1DAKBggqhkjOPQQDAzBvMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBFQ0Mg +Um9vdCBDQSAyMDIxMB4XDTIxMDMxOTA5MzY1OFoXDTM2MDMxNTA5MzY1N1owYjEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExGjAYBgNVBAMMEUhBUklDQSBTL01JTUUgRUND +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEbidoUrQSTETj3yKvZfP8hz4cjWJYLu2S +BQVAorbEw3a9bbiaYGSreAJJamuTKI/8EiGjOZ3gAxlWZirpKofebc+lqDa87zjh +E88mIhT+WYBu4S/0+lb0r59quKF8Y0pHo4IBbTCCAWkwEgYDVR0TAQH/BAgwBgEB +/wIBADAfBgNVHSMEGDAWgBRSCNK+MoEl/fUal+xOXxq7U82QrTBXBggrBgEFBQcB +AQRLMEkwRwYIKwYBBQUHMAKGO2h0dHA6Ly9yZXBvLmhhcmljYS5nci9jZXJ0cy9I +QVJJQ0EtQ2xpZW50LVJvb3QtMjAyMS1FQ0MuY2VyMEQGA1UdIAQ9MDswOQYEVR0g +ADAxMC8GCCsGAQUFBwIBFiNodHRwOi8vcmVwby5oYXJpY2EuZ3IvZG9jdW1lbnRz +L0NQUzAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwQwRQYDVR0fBD4wPDA6 +oDigNoY0aHR0cDovL2NybC5oYXJpY2EuZ3IvSEFSSUNBLUNsaWVudC1Sb290LTIw +MjEtRUNDLmNybDAdBgNVHQ4EFgQUTq8v2Rh752P2xakBhmVdLF4791YwDgYDVR0P +AQH/BAQDAgGGMAoGCCqGSM49BAMDA2cAMGQCMEayoDfZOkZgvT13XbjuSpKc2m/C +cEQqYDwGIyXBhZqyMoMyDcthQsiEwBW3lHT5IQIwIT2kzroVW2iWhRF3vaTEAo3m ++AosNW84YGle0MMG2SDIoQEJiqhjRfQwZHHeBzCE +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA S/MIME RSA +-----BEGIN CERTIFICATE----- +MIIGyjCCBLKgAwIBAgIQKU8NCxll7jtXyXGT7O5U0zANBgkqhkiG9w0BAQsFADBv +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEnMCUGA1UEAwweSEFSSUNBIENsaWVudCBS +U0EgUm9vdCBDQSAyMDIxMB4XDTIxMDMxOTA5MzczOFoXDTM2MDMxNTA5MzczN1ow +YjELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBS +ZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExGjAYBgNVBAMMEUhBUklDQSBTL01JTUUg +UlNBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA5TUrmNU21Y8gpvrx +XCCOJS54uCyv6H6HQ1NCCz2FCg04VaNEuRYFGSXbxC88u/Q3M/MfegErs6MlLg5j +gaAD7UYRpFMs/rS6Z6Pr45LDaWHTsS7gjhYaq47+iO0+17eTc7VWeTFzZji3ocFA +CiXc+tEDRXSGal+qu4s6/Zmexmtp3P8QodrLgy6mAXO7necfkCoyBVjUcixIvPeR +1NeFWfy2io7QU+Wf4Wm18259W+oJgxH0mgfFKgGrz/c2BokxzdXFvuKggVWEh/p0 +jy8uRQOGcMRT2fyUh5LM6zRBGObE8xIiRfforJF2CGSu1B0wj0jCmFG8Z2UN55xk +EH+HKpQlm+S0mvez7YYUqMobN1RjTWHSZQoX2nuYk0no/cxbUl/AVXE0fq3DkNv4 +fdTGfLkTJ42qdyAIpjEjkLgrb4C4CT2IxPuNCLCE25SnVmydA7CuMmbwfljWwkE4 +JyO23QL90baDIcEg/NYp/IueW9qu8YwCX5ipJhflzqnjONFKDlC93FnoQ/i/ALIk +MOhjCrHePX3/F7OWXWAoIRdbqoCNvN4KnJfcFJCcPDOGY7qds5cMcII3uKhhsRCQ +Jg2KKGvVdwp1oBIibuVM+cIZi3QucK8TfssODws1WNce5RV+Gniv5U4dhS+5nPvC +gQr6a8wL4ha8hc8ovXEhrrBHtOMCAwEAAaOCAW0wggFpMBIGA1UdEwEB/wQIMAYB +Af8CAQAwHwYDVR0jBBgwFoAUoNYHPV4k93ugRC4kUg0ZqisEkacwVwYIKwYBBQUH +AQEESzBJMEcGCCsGAQUFBzAChjtodHRwOi8vcmVwby5oYXJpY2EuZ3IvY2VydHMv +SEFSSUNBLUNsaWVudC1Sb290LTIwMjEtUlNBLmNlcjBEBgNVHSAEPTA7MDkGBFUd +IAAwMTAvBggrBgEFBQcCARYjaHR0cDovL3JlcG8uaGFyaWNhLmdyL2RvY3VtZW50 +cy9DUFMwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMEMEUGA1UdHwQ+MDww +OqA4oDaGNGh0dHA6Ly9jcmwuaGFyaWNhLmdyL0hBUklDQS1DbGllbnQtUm9vdC0y +MDIxLVJTQS5jcmwwHQYDVR0OBBYEFJX2FFz9LqSMQ6C3jgW0Orbsgg2fMA4GA1Ud +DwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAL2XgP79Wk/ijkQOI55UihqPR +m68QXb1IDKYCmQiJFkqNQOf/Bt10RjtASV2kjDEQoI4cnmwB++Pd8A9kyFdWPwJ2 +bX68XvFvHX6FzejNwJ5b6wIiz1W3QlHZwhTSoV0KHh4H4gzD8N9X4HwfESVYRkQF +3tTg1VnZJI2ggCg5201X3BI6YLDc4GltF5EORVzT5xPpMiNHxDivoP8tEdijw5Jh +kpCDG8BbpbfXQ1HjPRawQ4w6w1aXgyhajLAhjZdLOfpmZO5EbYai46m/niK5mKTU +/RFupYhNsejfD5wm6JEZbisV5SXVPbmIXrTwvt66lGvgSb3X7waCFIVuwr71qc/q +YNyvXA4ALSchFU8xqWDu2StWrB3i3CzNR2DMz18yYpjeJehuuhsIZu+Ku4CML1O3 +nlKCXx3EbABVAJkXhbDqHnUw0kqPHh9enYcXdJ8zjLhHVBghdLfpOmp9ZtIOjquL +mi+OEwkhOv6bC4IXpY57c5bBzK9DCoffE1LG/JUfOSE0B6kuTVyXvFhfq7BK5LcJ +WieXlSD3COOcqBQ62eHKpRPEYOLdAHDdOq6MBHHMAVUkB/pAFgDj0twB+xP2FUUk +ycRBPSiBgQEnuI8QLrQXB3FDSXqh+I8jfUcVc7JvgvG4bLMjozgFB58A7gJ5YDce +4P4ckMdKzbAXPa5tY2Y= +-----END CERTIFICATE----- +Subject: C=GB,ST=Greater Manchester,L=Salford,O=Sectigo Limited,CN=Sectigo ECC Organization Validation Secure Server CA +-----BEGIN CERTIFICATE----- +MIIDrjCCAzOgAwIBAgIQNb50Y4yz6d4oBXC3l4CzZzAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTgxMTAy +MDAwMDAwWhcNMzAxMjMxMjM1OTU5WjCBlTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEYMBYGA1UEChMP +U2VjdGlnbyBMaW1pdGVkMT0wOwYDVQQDEzRTZWN0aWdvIEVDQyBPcmdhbml6YXRp +b24gVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENBMFkwEwYHKoZIzj0CAQYIKoZI +zj0DAQcDQgAEnI5cCmFvoVij0NXO+vxE+f+6Bh57FhpyH0LTCrJmzfsPSXIhTSex +r92HOlz+aHqoGE0vSe/CSwLFoWcZ8W1jOaOCAW4wggFqMB8GA1UdIwQYMBaAFDrh +CYbUzxnClnZ0SXbc4DXGY2OaMB0GA1UdDgQWBBRNSu/ERrMSrU9OmrFZ4lGrCBB4 +CDAOBgNVHQ8BAf8EBAMCAYYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHSUEFjAU +BggrBgEFBQcDAQYIKwYBBQUHAwIwGwYDVR0gBBQwEjAGBgRVHSAAMAgGBmeBDAEC +AjBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVNF +UlRydXN0RUNDQ2VydGlmaWNhdGlvbkF1dGhvcml0eS5jcmwwdgYIKwYBBQUHAQEE +ajBoMD8GCCsGAQUFBzAChjNodHRwOi8vY3J0LnVzZXJ0cnVzdC5jb20vVVNFUlRy +dXN0RUNDQWRkVHJ1c3RDQS5jcnQwJQYIKwYBBQUHMAGGGWh0dHA6Ly9vY3NwLnVz +ZXJ0cnVzdC5jb20wCgYIKoZIzj0EAwMDaQAwZgIxAOk//uo7i/MoeKdcyeqvjOXs +BJFGLI+1i0d+Tty7zEnn2w4DNS21TK8wmY3Kjm3EmQIxAPI1qHM/I+OS+hx0OZhG +fDoNifTe/GxgWZ1gOYQKzn6lwP0yGKlrP+7vrVC8IczJ4A== +-----END CERTIFICATE----- +Subject: C=GB,ST=Greater Manchester,L=Salford,O=Sectigo Limited,CN=Sectigo RSA Organization Validation Secure Server CA +-----BEGIN CERTIFICATE----- +MIIGGTCCBAGgAwIBAgIQE31TnKp8MamkM3AZaIR6jTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTgx +MTAyMDAwMDAwWhcNMzAxMjMxMjM1OTU5WjCBlTELMAkGA1UEBhMCR0IxGzAZBgNV +BAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEYMBYGA1UE +ChMPU2VjdGlnbyBMaW1pdGVkMT0wOwYDVQQDEzRTZWN0aWdvIFJTQSBPcmdhbml6 +YXRpb24gVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAnJMCRkVKUkiS/FeN+S3qU76zLNXYqKXsW2kDwB0Q +9lkz3v4HSKjojHpnSvH1jcM3ZtAykffEnQRgxLVK4oOLp64m1F06XvjRFnG7ir1x +on3IzqJgJLBSoDpFUd54k2xiYPHkVpy3O/c8Vdjf1XoxfDV/ElFw4Sy+BKzL+k/h +fGVqwECn2XylY4QZ4ffK76q06Fha2ZnjJt+OErK43DOyNtoUHZZYQkBuCyKFHFEi +rsTIBkVtkuZntxkj5Ng2a4XQf8dS48+wdQHgibSov4o2TqPgbOuEQc6lL0giE5dQ +YkUeCaXMn2xXcEAG2yDoG9bzk4unMp63RBUJ16/9fAEc2wIDAQABo4IBbjCCAWow +HwYDVR0jBBgwFoAUU3m/WqorSs9UgOHYm8Cd8rIDZsswHQYDVR0OBBYEFBfZ1iUn +Z/kxwklD2TA2RIxsqU/rMA4GA1UdDwEB/wQEAwIBhjASBgNVHRMBAf8ECDAGAQH/ +AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAbBgNVHSAEFDASMAYG +BFUdIAAwCAYGZ4EMAQICMFAGA1UdHwRJMEcwRaBDoEGGP2h0dHA6Ly9jcmwudXNl +cnRydXN0LmNvbS9VU0VSVHJ1c3RSU0FDZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNy +bDB2BggrBgEFBQcBAQRqMGgwPwYIKwYBBQUHMAKGM2h0dHA6Ly9jcnQudXNlcnRy +dXN0LmNvbS9VU0VSVHJ1c3RSU0FBZGRUcnVzdENBLmNydDAlBggrBgEFBQcwAYYZ +aHR0cDovL29jc3AudXNlcnRydXN0LmNvbTANBgkqhkiG9w0BAQwFAAOCAgEAThNA +lsnD5m5bwOO69Bfhrgkfyb/LDCUW8nNTs3Yat6tIBtbNAHwgRUNFbBZaGxNh10m6 +pAKkrOjOzi3JKnSj3N6uq9BoNviRrzwB93fVC8+Xq+uH5xWo+jBaYXEgscBDxLmP +bYox6xU2JPti1Qucj+lmveZhUZeTth2HvbC1bP6mESkGYTQxMD0gJ3NR0N6Fg9N3 +OSBGltqnxloWJ4Wyz04PToxcvr44APhL+XJ71PJ616IphdAEutNCLFGIUi7RPSRn +R+xVzBv0yjTqJsHe3cQhifa6ezIejpZehEU4z4CqN2mLYBd0FUiRnG3wTqN3yhsc +SPr5z0noX0+FCuKPkBurcEya67emP7SsXaRfz+bYipaQ908mgWB2XQ8kd5GzKjGf +FlqyXYwcKapInI5v03hAcNt37N3j0VcFcC3mSZiIBYRiBXBWdoY5TtMibx3+bfEO +s2LEPMvAhblhHrrhFYBZlAyuBbuMf1a+HNJav5fyakywxnB2sJCNwQs2uRHY1ihc +6k/+JLcYCpsM0MF8XPtpvcyiTcaQvKZN8rG61ppnW5YCUtCC+cQKXA0o4D/I+pWV +idWkvklsQLI+qGu41SWyxP7x09fn1txDAXYw+zuLXfdKiXyaNb78yvBXAfCNP6CH +MntHWpdLgtJmwsQt6j8k9Kf5qLnjatkYYaA7jBU= +-----END CERTIFICATE----- diff --git a/download/1.7.0/server_cert_root_cas.pem b/download/1.7.0/server_cert_root_cas.pem new file mode 100644 index 000000000..5e85fca81 --- /dev/null +++ b/download/1.7.0/server_cert_root_cas.pem @@ -0,0 +1,152 @@ +Subject: C=DE,O=D-Trust GmbH,CN=D-TRUST Root Class 3 CA 2 2009 +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA TLS ECC Root CA 2021 +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw +CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh +cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v +dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG +A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7 +KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y +STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD +AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw +SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN +nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- +Subject: C=GR,O=Hellenic Academic and Research Institutions CA,CN=HARICA TLS RSA Root CA 2021 +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs +MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg +Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL +MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l +mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE +4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv +a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M +pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw +Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b +LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY +AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB +AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq +E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr +W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ +CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU +X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3 +f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja +H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP +JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P +zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt +jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0 +/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT +BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79 +aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW +xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU +63ZTGI0RmLo= +-----END CERTIFICATE----- +Subject: C=DE,O=T-Systems Enterprise Services GmbH,OU=T-Systems Trust Center,CN=T-TeleSec GlobalRoot Class 2 +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- +Subject: C=US,ST=New Jersey,L=Jersey City,O=The USERTRUST Network,CN=USERTrust ECC Certification Authority +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- +Subject: C=US,ST=New Jersey,L=Jersey City,O=The USERTRUST Network,CN=USERTrust RSA Certification Authority +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- diff --git a/download/dsf_bpe_1_0_0.tar.gz b/download/dsf_bpe_1_0_0.tar.gz new file mode 100644 index 000000000..fbb3b74ff Binary files /dev/null and b/download/dsf_bpe_1_0_0.tar.gz differ diff --git a/download/dsf_bpe_1_1_0.tar.gz b/download/dsf_bpe_1_1_0.tar.gz new file mode 100644 index 000000000..4fa370d0e Binary files /dev/null and b/download/dsf_bpe_1_1_0.tar.gz differ diff --git a/download/dsf_bpe_1_2_0.tar.gz b/download/dsf_bpe_1_2_0.tar.gz new file mode 100644 index 000000000..10c168766 Binary files /dev/null and b/download/dsf_bpe_1_2_0.tar.gz differ diff --git a/download/dsf_bpe_1_3_0.tar.gz b/download/dsf_bpe_1_3_0.tar.gz new file mode 100644 index 000000000..3362f863e Binary files /dev/null and b/download/dsf_bpe_1_3_0.tar.gz differ diff --git a/download/dsf_bpe_1_3_1.tar.gz b/download/dsf_bpe_1_3_1.tar.gz new file mode 100644 index 000000000..2aa4491a7 Binary files /dev/null and b/download/dsf_bpe_1_3_1.tar.gz differ diff --git a/download/dsf_bpe_1_3_2.tar.gz b/download/dsf_bpe_1_3_2.tar.gz new file mode 100644 index 000000000..34008fd0e Binary files /dev/null and b/download/dsf_bpe_1_3_2.tar.gz differ diff --git a/download/dsf_bpe_1_4_0.tar.gz b/download/dsf_bpe_1_4_0.tar.gz new file mode 100644 index 000000000..1b84fca62 Binary files /dev/null and b/download/dsf_bpe_1_4_0.tar.gz differ diff --git a/download/dsf_bpe_1_5_0.tar.gz b/download/dsf_bpe_1_5_0.tar.gz new file mode 100644 index 000000000..60f117774 Binary files /dev/null and b/download/dsf_bpe_1_5_0.tar.gz differ diff --git a/download/dsf_bpe_1_5_1.tar.gz b/download/dsf_bpe_1_5_1.tar.gz new file mode 100644 index 000000000..eb92deee5 Binary files /dev/null and b/download/dsf_bpe_1_5_1.tar.gz differ diff --git a/download/dsf_bpe_1_5_2.tar.gz b/download/dsf_bpe_1_5_2.tar.gz new file mode 100644 index 000000000..054bf5c54 Binary files /dev/null and b/download/dsf_bpe_1_5_2.tar.gz differ diff --git a/download/dsf_bpe_1_6_0.tar.gz b/download/dsf_bpe_1_6_0.tar.gz new file mode 100644 index 000000000..15e94ff27 Binary files /dev/null and b/download/dsf_bpe_1_6_0.tar.gz differ diff --git a/download/dsf_bpe_1_7_0.tar.gz b/download/dsf_bpe_1_7_0.tar.gz new file mode 100644 index 000000000..268686d0c Binary files /dev/null and b/download/dsf_bpe_1_7_0.tar.gz differ diff --git a/download/dsf_fhir_1_0_0.tar.gz b/download/dsf_fhir_1_0_0.tar.gz new file mode 100644 index 000000000..4b4ebef6d Binary files /dev/null and b/download/dsf_fhir_1_0_0.tar.gz differ diff --git a/download/dsf_fhir_1_1_0.tar.gz b/download/dsf_fhir_1_1_0.tar.gz new file mode 100644 index 000000000..7f05e2002 Binary files /dev/null and b/download/dsf_fhir_1_1_0.tar.gz differ diff --git a/download/dsf_fhir_1_2_0.tar.gz b/download/dsf_fhir_1_2_0.tar.gz new file mode 100644 index 000000000..b79e705d0 Binary files /dev/null and b/download/dsf_fhir_1_2_0.tar.gz differ diff --git a/download/dsf_fhir_1_3_0.tar.gz b/download/dsf_fhir_1_3_0.tar.gz new file mode 100644 index 000000000..897f03d05 Binary files /dev/null and b/download/dsf_fhir_1_3_0.tar.gz differ diff --git a/download/dsf_fhir_1_3_1.tar.gz b/download/dsf_fhir_1_3_1.tar.gz new file mode 100644 index 000000000..263d7470d Binary files /dev/null and b/download/dsf_fhir_1_3_1.tar.gz differ diff --git a/download/dsf_fhir_1_3_2.tar.gz b/download/dsf_fhir_1_3_2.tar.gz new file mode 100644 index 000000000..9e650e9e6 Binary files /dev/null and b/download/dsf_fhir_1_3_2.tar.gz differ diff --git a/download/dsf_fhir_1_4_0.tar.gz b/download/dsf_fhir_1_4_0.tar.gz new file mode 100644 index 000000000..ef7d24b8b Binary files /dev/null and b/download/dsf_fhir_1_4_0.tar.gz differ diff --git a/download/dsf_fhir_1_5_0.tar.gz b/download/dsf_fhir_1_5_0.tar.gz new file mode 100644 index 000000000..7e9c6bc22 Binary files /dev/null and b/download/dsf_fhir_1_5_0.tar.gz differ diff --git a/download/dsf_fhir_1_5_1.tar.gz b/download/dsf_fhir_1_5_1.tar.gz new file mode 100644 index 000000000..511bab7c7 Binary files /dev/null and b/download/dsf_fhir_1_5_1.tar.gz differ diff --git a/download/dsf_fhir_1_5_2.tar.gz b/download/dsf_fhir_1_5_2.tar.gz new file mode 100644 index 000000000..de8a28ff5 Binary files /dev/null and b/download/dsf_fhir_1_5_2.tar.gz differ diff --git a/download/dsf_fhir_1_6_0.tar.gz b/download/dsf_fhir_1_6_0.tar.gz new file mode 100644 index 000000000..d77a98abf Binary files /dev/null and b/download/dsf_fhir_1_6_0.tar.gz differ diff --git a/download/dsf_fhir_1_7_0.tar.gz b/download/dsf_fhir_1_7_0.tar.gz new file mode 100644 index 000000000..1b4c75ee9 Binary files /dev/null and b/download/dsf_fhir_1_7_0.tar.gz differ diff --git a/for-you/index.html b/for-you/index.html new file mode 100644 index 000000000..9ea191dcd --- /dev/null +++ b/for-you/index.html @@ -0,0 +1,41 @@ + + + + + + + + + +