End-to-end workflow diagram
Foundry Datasets: Patients, Nurses, Beds → feed into Assignments.
AIP Logic Layer: consumes Assignments, applies decision logic.
Downstream Systems:
Notifications (alerts ops team)
Compliance Team (manual escalation)
Clinical Dashboard (real-time monitoring)
This shows how Foundry handles data orchestration while AIP Logic automates operational workflows.
from datetime import datetime, timedelta
import pandas as pd
# 온톨로지 데이터 정의 (간호사, 병상, 환자 객체)
nurses = [
{"id": "N001", "name": "Alice", "shift": "day", "availability": True, "hospital_id": "H001"},
{"id": "N002", "name": "Bob", "shift": "night", "availability": False, "hospital_id": "H001"},
]
beds = [
{"id": "B001", "hospital_id": "H001", "status": "available", "room": "ICU"},
{"id": "B002", "hospital_id": "H001", "status": "occupied", "room": "General"},
]
patients = [
{"id": "P001", "name": "John Doe", "condition": "critical", "admission_date": datetime(2025, 9, 15)},
{"id": "P002", "name": "Jane Smith", "condition": "stable", "admission_date": datetime(2025, 9, 16)},
]
# 온톨로지 관계 정의 (간호사-환자-병상)
def build_ontology():
ontology = {
"nurses": pd.DataFrame(nurses),
"beds": pd.DataFrame(beds),
"patients": pd.DataFrame(patients),
"relationships": [
{"nurse_id": "N001", "patient_id": "P001", "bed_id": "B001"},
]
}
return ontology
# AIP Logic: 간호사 스케줄링 및 병상 배치
def schedule_nurses_and_beds(ontology):
available_nurses = ontology["nurses"][ontology["nurses"]["availability"] == True]
available_beds = ontology["beds"][ontology["beds"]["status"] == "available"]
critical_patients = ontology["patients"][ontology["patients"]["condition"] == "critical"]
assignments = []
for _, patient in critical_patients.iterrows():
if not available_nurses.empty and not available_beds.empty:
nurse = available_nurses.iloc[0]
bed = available_beds.iloc[0]
assignments.append({
"patient_id": patient["id"],
"nurse_id": nurse["id"],
"bed_id": bed["id"],
"assignment_time": datetime.now()
})
# 업데이트: 간호사와 병상 상태 변경
ontology["nurses"].loc[ontology["nurses"]["id"] == nurse["id"], "availability"] = False
ontology["beds"].loc[ontology["beds"]["id"] == bed["id"], "status"] = "occupied"
return assignments, ontology
# AI 에이전트: 실시간 스케줄링 추천
def ai_agent_recommendation(ontology):
assignments, updated_ontology = schedule_nurses_and_beds(ontology)
recommendations = []
for assignment in assignments:
recommendation = (
f"Patient {assignment['patient_id']} assigned to Nurse {assignment['nurse_id']} "
f"and Bed {assignment['bed_id']} at {assignment['assignment_time']}"
)
recommendations.append(recommendation)
return recommendations
# 실행
ontology = build_ontology()
recommendations = ai_agent_recommendation(ontology)
for rec in recommendations:
print(rec)
This report documents the design and implementation of a nurse scheduling and hospital bed assignment workflow.
The workflow was first developed locally using Python and pandas for rapid prototyping.
It was then converted into a Spark/SQL-based Foundry pipeline for production deployment, ensuring scalability, data lineage, and integration with operational dashboards.
Hospitals need automated, rule-based scheduling of nurses and bed assignments for patients, prioritizing critical cases.
The system must:
from datetime import datetime
import pandas as pd
# Ontology: Nurses, Beds, Patients
nurses = [
{"id": "N001", "name": "Alice", "shift": "day", "availability": True, "hospital_id": "H001"},
{"id": "N002", "name": "Bob", "shift": "night", "availability": False, "hospital_id": "H001"},
]
beds = [
{"id": "B001", "hospital_id": "H001", "status": "available", "room": "ICU"},
{"id": "B002", "hospital_id": "H001", "status": "occupied", "room": "General"},
]
patients = [
{"id": "P001", "name": "John Doe", "condition": "critical", "admission_date": datetime(2025, 9, 15)},
{"id": "P002", "name": "Jane Smith", "condition": "stable", "admission_date": datetime(2025, 9, 16)},
]
# Build ontology
def build_ontology():
return {
"nurses": pd.DataFrame(nurses),
"beds": pd.DataFrame(beds),
"patients": pd.DataFrame(patients),
}
# Scheduling logic
def schedule(ontology):
available_nurses = ontology["nurses"][ontology["nurses"]["availability"] == True]
available_beds = ontology["beds"][ontology["beds"]["status"] == "available"]
critical_patients = ontology["patients"][ontology["patients"]["condition"] == "critical"]
assignments = []
for _, patient in critical_patients.iterrows():
if not available_nurses.empty and not available_beds.empty:
nurse = available_nurses.iloc[0]
bed = available_beds.iloc[0]
assignments.append({
"patient_id": patient["id"],
"nurse_id": nurse["id"],
"bed_id": bed["id"],
"assignment_time": datetime.now()
})
# update states
ontology["nurses"].loc[ontology["nurses"]["id"] == nurse["id"], "availability"] = False
ontology["beds"].loc[ontology["beds"]["id"] == bed["id"], "status"] = "occupied"
return assignments
# Run locally
ontology = build_ontology()
print(schedule(ontology))
Foundry transforms pandas prototypes into scalable Spark/SQL pipelines with:
nurses_dataset
id, name, shift, availability, hospital_id
beds_dataset
id, hospital_id, status, room
patients_dataset
id, name, condition, admission_date
-- Select available nurses
WITH available_nurses AS (
SELECT id as nurse_id, hospital_id
FROM nurses_dataset
WHERE availability = TRUE
),
-- Select available beds
available_beds AS (
SELECT id as bed_id, hospital_id
FROM beds_dataset
WHERE status = 'available'
),
-- Select critical patients
critical_patients AS (
SELECT id as patient_id, hospital_id
FROM patients_dataset
WHERE condition = 'critical'
),
-- Join for assignment
assignments AS (
SELECT
p.patient_id,
n.nurse_id,
b.bed_id,
CURRENT_TIMESTAMP() as assignment_time
FROM critical_patients p
JOIN available_nurses n ON p.hospital_id = n.hospital_id
JOIN available_beds b ON p.hospital_id = b.hospital_id
LIMIT 10
)
SELECT * FROM assignments;
Pipeline Orchestration: Foundry transforms scheduled hourly/daily.
Lineage: assignments dataset is auditable and traceable.
Downstream Integration:
Aspect | Local (pandas) | Foundry (Spark/SQL) |
---|---|---|
Scale | Small datasets, prototyping | Large-scale hospital data |
Execution | Manual / script | Automated pipelines, scheduling |
Governance | None | Full lineage, versioning |
Integration | Standalone | Dashboards, AIP, notifications |
Deployment | Local environment | Production-grade Foundry pipelines |
Patients Dataset
Contains hospital admission records with patient ID, name, condition (critical/stable), and admission date.
Purpose: Identify which patients require immediate bed and nurse assignment.
Nurses Dataset
Contains staff rosters with nurse ID, name, shift type, availability, and hospital ID.
Purpose: Filter for currently available nurses in the right hospital/shift.
Beds Dataset
Contains bed inventory with bed ID, hospital ID, status (available/occupied), and room type (ICU/General).
Purpose: Filter for available beds that can be matched with patient needs.
Transform Step (Spark/SQL)
Join patients with available nurses and beds by hospital_id
.
Filter for critical patients first to ensure priority assignment.
Generate an Assignments dataset containing:
patient_id
nurse_id
bed_id
assignment_time
Output Dataset: Assignments
This dataset represents the authoritative record of nurse-patient-bed mappings.
It is versioned, auditable, and fully lineage-tracked in Foundry.
Trigger
The AIP workflow is triggered whenever the Assignments dataset is updated in Foundry.
Logic Flow
Notifications
Compliance Team
Clinical Dashboard
This integrated workflow ensures real-time hospital operations management with:
Palantir의 AIP(Artificial Intelligence Platform): 기업이 실행형 AI 운영체제를 구축하도록 돕는 프로그램입니다. 이 과정은 문제 정의, 온톨로지 설계, 워크플로 자동화, AI 에이전트 배포를 체계적으로 진행하며, 실제 데이터를 기반으로 조직의 운영 방식을 재설계합니다. 아래는 각 단계에 대한 예시를 포함한 상세 과정입니다.
문제 정의 → 온톨로지 설계 → 워크플로 자동화 → AI 에이전트 배포를 통해 실행형 AI 운영체제를 구축합니다. 예를 들어, 공급망 지연 문제를 해결하기 위해 데이터를 통합하고, 온톨로지를 설계하며, AI 에이전트를 배포해 실시간 의사결정을 지원할 수 있습니다. 이 과정은 단순한 기술 도입이 아니라 조직의 사고방식과 운영 방식을 재설계하는 데 초점을 맞춥니다.
Palantir은 데이터 통합, 분석, 의사결정을 지원하는 엔드투엔드 플랫폼을 제공합니다.
AIP Logic은 비즈니스 규칙과 LLM 기반 에이전트의 액션을 연결하는 워크플로 언어입니다.
예제: 의료 데이터 접근 요청 자동화
workflow:
name: PatientDataAccessApproval
triggers:
- type: user_request
input: patient_id, requester_role
steps:
- name: ValidateRequester
action: check_role
params:
allowed_roles: ["doctor", "nurse", "researcher"]
- name: PHI_Check
action: query_data_classification
params:
dataset: patient_records
patient_id: $patient_id
- name: ApprovalLogic
action: conditional
conditions:
- if: requester_role in ["doctor","nurse"] and PHI_Check == "low"
then: auto_approve
- if: requester_role == "researcher"
then: route_to_compliance_team
- name: Notify
action: send_notification
params:
message: "Request for patient $patient_id processed. Status: $status"
➡️ 설명:
Foundry는 데이터셋을 SQL Transform이나 코드 변환(Python, Spark)으로 정의합니다.
예제: 의료 비용 이상 탐지 데이터셋 생성
-- Step 1: Join patient and claims data
SELECT
p.patient_id,
p.age,
c.claim_id,
c.procedure_code,
c.amount
FROM patients p
JOIN claims c
ON p.patient_id = c.patient_id;
-- Step 2: Flag abnormal claims (amount > 2 std dev from mean)
WITH claim_stats AS (
SELECT
procedure_code,
AVG(amount) as avg_amount,
STDDEV(amount) as std_amount
FROM claims
GROUP BY procedure_code
)
SELECT
c.claim_id,
c.patient_id,
c.procedure_code,
c.amount,
CASE
WHEN c.amount > cs.avg_amount + 2 * cs.std_amount
THEN 'abnormal'
ELSE 'normal'
END as claim_flag
FROM claims c
JOIN claim_stats cs
ON c.procedure_code = cs.procedure_code;
➡️ 설명:
먼저 청구 데이터셋을 처리하고 이상 항목을 태깅합니다.
-- Step 1: Join patient and claims data
WITH joined AS (
SELECT
p.patient_id,
p.age,
c.claim_id,
c.procedure_code,
c.amount
FROM patients p
JOIN claims c
ON p.patient_id = c.patient_id
)
-- Step 2: Calculate stats by procedure
, claim_stats AS (
SELECT
procedure_code,
AVG(amount) as avg_amount,
STDDEV(amount) as std_amount
FROM joined
GROUP BY procedure_code
)
-- Step 3: Label abnormal claims
SELECT
j.claim_id,
j.patient_id,
j.procedure_code,
j.amount,
CASE
WHEN j.amount > cs.avg_amount + 2 * cs.std_amount
THEN 'abnormal'
ELSE 'normal'
END as claim_flag
FROM joined j
JOIN claim_stats cs
ON j.procedure_code = cs.procedure_code;
➡️ Output dataset: claims_with_flags
normal
또는 abnormal
로 표시Foundry에서 생성된 claims_with_flags
데이터셋을 기반으로 자동화 심사 로직을 실행합니다.
workflow:
name: AbnormalClaimReview
triggers:
- type: dataset_update
dataset: claims_with_flags
steps:
- name: FetchAbnormalClaims
action: query_dataset
params:
dataset: claims_with_flags
filter: claim_flag == "abnormal"
- name: RiskAssessment
action: llm_assist
params:
prompt: |
Analyze the claim for potential fraud indicators.
Consider patient age, procedure code, and claim amount.
input: $FetchAbnormalClaims
- name: Decision
action: conditional
conditions:
- if: RiskAssessment contains "high risk"
then: route_to_investigation_team
- if: RiskAssessment contains "low risk"
then: auto_approve
- name: Notify
action: send_notification
params:
message: |
Claim $claim_id has been reviewed.
Status: $status
➡️ 설명:
abnormal
청구 건만 가져옴데이터 엔지니어링 (Foundry)
claims_with_flags
데이터셋 생성자동화 심사 (AIP Logic)
claims_with_flags
가 업데이트되면 트리거 실행운영/알림
이렇게 하면 Foundry는 데이터 기반 이상 탐지를 담당하고,
AIP Logic은 운영 자동화와 LLM 보조 의사결정을 담당하는 엔드투엔드 AI-운영 파이프라인이 완성됩니다.