Compare commits

...

68 Commits

Author SHA1 Message Date
Federico Scodelaro 9fb252759a chore: Better example config 2026-03-24 07:55:28 +01:00
Federico Scodelaro 3a26d2ec4c Update example_configs/stalwart.md
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2026-03-24 07:55:28 +01:00
Federico Scodelaro 86d9ea10d6 docs(stalwart): Add alias example 2026-03-24 07:55:28 +01:00
dependabot[bot] 2ad634deda build(deps): bump docker/setup-qemu-action from 3 to 4
Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 3 to 4.
- [Release notes](https://github.com/docker/setup-qemu-action/releases)
- [Commits](https://github.com/docker/setup-qemu-action/compare/v3...v4)

---
updated-dependencies:
- dependency-name: docker/setup-qemu-action
  dependency-version: '4'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-03-04 22:13:24 +01:00
dependabot[bot] 155bda6bbf build(deps): bump actions/download-artifact from 7 to 8
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 7 to 8.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v7...v8)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: '8'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-02-27 22:01:45 +01:00
dependabot[bot] 7d1593e266 build(deps): bump actions/upload-artifact from 6 to 7
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 6 to 7.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v6...v7)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: '7'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-02-27 18:48:41 +01:00
Sertonix 8c8df11250 cargo: Update wasm-bindgen and lettre 2026-02-24 14:17:17 +01:00
Santi Gonzalez aa1384939b example_config: fix HA for blank displayName 2026-02-19 00:15:12 +01:00
lyzstrik 6f94134fdc refactor(server): migrate to rustls 0.23 and centralize TLS logic (#1389)
This commit upgrades the TLS stack to Rustls 0.23

Key changes:
- Dependencies: Updated 'rustls' (v0.23), 'tokio-rustls' (v0.26), and 'actix-web' (v4.12.1).
- Build Fix: Configured 'rustls' to use the 'ring' provider (disabling default 'aws-lc-rs') to ensure ARMv7 compatibility.
- Refactor: Created 'server/src/tls.rs' to handle certificate loading (DRY).
- LDAP: Updated 'ldap_server.rs' to use the new TLS module and Rustls APIs.
- Healthcheck: Updated 'healthcheck.rs' to use Rustls 0.23 types.
2026-01-31 09:47:11 +01:00
Valentin Tolmer d1904a2759 readme: Add a link to TrueNAS installation guide 2026-01-31 09:42:12 +01:00
Asher Densmore-Lynn 02d92c3261 example_configs: Add Apache WebDAV 2026-01-31 09:41:53 +01:00
Michael Reid 48058540ec example_configs: Installing and Configuring LLDAP on TrueNAS 2026-01-31 09:36:18 +01:00
Copilot 618e3f3062 Fix cn attribute case-insensitive matching in LDAP equality filters (#1363) 2026-01-31 09:34:10 +01:00
jakob42 cafd3732f0 example_configs: add Continuwuity 2026-01-23 13:51:26 +01:00
dependabot[bot] 8588d4b851 build(deps): bump actions/checkout from 6.0.1 to 6.0.2
Bumps [actions/checkout](https://github.com/actions/checkout) from 6.0.1 to 6.0.2.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v6.0.1...v6.0.2)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 6.0.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-22 21:28:51 +01:00
Hobbabobba 2f70e2e31f example_configs: add Semaphore 2026-01-13 19:09:36 +01:00
lwintermelon a9d04b6bdf example_config: Add gerrit 2026-01-09 08:02:06 +01:00
Osi Bluber c03f3b5498 docs(bootstrap): add password_file for user configs 2026-01-07 18:45:32 +01:00
Copilot ac55dfedc4 app: Remove password length validation from login form 2026-01-06 23:37:01 +01:00
josef 62ae1d73fa app: asterisk for mail attribute when creating a user 2025-12-24 22:53:17 +01:00
Valentin Tolmer 469f35c12c cargo: Update dependencies 2025-12-24 15:33:30 +01:00
dependabot[bot] ee9fec71a5 build(deps): bump actions/upload-artifact from 4 to 6
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 6.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v4...v6)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-12 23:23:32 +01:00
dependabot[bot] 9cbb0c99e2 build(deps): bump actions/download-artifact from 6 to 7
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 6 to 7.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v6...v7)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: '7'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-12 23:00:34 +01:00
dependabot[bot] 81e985df48 build(deps): bump actions/cache from 4 to 5
Bumps [actions/cache](https://github.com/actions/cache) from 4 to 5.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](https://github.com/actions/cache/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-12 23:00:12 +01:00
Robert Cambridge a136a68bf4 example_configs: add sample group query to Grafana guide 2025-12-04 18:09:08 +01:00
dependabot[bot] 8f0022a9f1 build(deps): bump actions/checkout from 6.0.0 to 6.0.1
Bumps [actions/checkout](https://github.com/actions/checkout) from 6.0.0 to 6.0.1.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v6.0.0...v6.0.1)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 6.0.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-02 22:28:44 +01:00
dependabot[bot] fc7b33e4b3 build(deps): bump actions/checkout from 5.0.1 to 6.0.0
Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.1 to 6.0.0.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v5.0.1...v6.0.0)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 6.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-11-21 00:42:49 +01:00
dependabot[bot] a9b5147a30 build(deps): bump actions/download-artifact from 4 to 6
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4 to 6.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v4...v6)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-11-17 23:42:22 +01:00
dependabot[bot] 4de069452f build(deps): bump actions/checkout from 5.0.0 to 5.0.1
Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.0 to 5.0.1.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v5.0.0...v5.0.1)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 5.0.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-11-17 22:47:42 +01:00
copilot-swe-agent[bot] e5c28a61d9 ldap: Fix LDAP base scope search to return NoSuchObject for non-existent entries
Added logic to return LdapResultCode::NoSuchObject (error 32) when a base scope
search returns no results, instead of returning Success with zero entries. This
aligns with RFC 4511 LDAP specification.
2025-11-16 15:20:06 +01:00
Valentin Tolmer c5e0441cae clippy: remove unnecessary elided lifetimes 2025-11-16 15:03:52 +01:00
Shawn Wilsher a959a50e07 server: allow specifying the healthcheck addresses
This change adds two new optional configuration options:
- `ldap_healthcheck_host` to pair with `ldap_host`
- `http_healthcheck_host` to pair with `http_host`

These both default to `localhost` to preserve the existing behavior.

Fixes #700
2025-11-16 15:03:40 +01:00
Tobias Jungel ab4389fc5f fix(bootstrap): set shopt nullglob
Set the `nullglob` option in the bootstrap script to handle cases where
no files match a glob pattern.

This prevents the following error when the folder exists without json
files:

```
/bootstrap/group-configs/*.json: jq: error: Could not open file /bootstrap/group-configs/*.json: No such file or directory
```
2025-11-09 22:35:50 +01:00
Tobias Jungel ddcbe383ab docs: Rename 'mail_alias' to 'mail-alias' in example config (#1346)
The example included an invalid character `_` for the attribute `name`

This resulted in:

```
Cannot create attribute with invalid name. Valid characters: a-z, A-Z, 0-9, and dash (-). Invalid chars found: _
```

This fixes the example by using a `-`.
2025-11-09 12:07:44 +01:00
Sören eee42502f3 docs: fix example_configs path
from ./example_configs to ../example_configs
2025-10-21 15:42:06 +02:00
thchha 660301eb5f example_configs: add initial gogs.md documentation
Gogs is the origin for common git forges so we add a documentation which
may be beneficial for other use cases where lldap should be used with.
It appears to be in mantenance mode - the current example may have to be
extended in the future.

We adapt the official documentation example configuration to integrate
lldap with the more elaborated example.
The reader may also be interested in a more simple example at
[upstream](https://github.com/gogs/gogs/blob/main/conf/auth.d/ldap_simple_auth.conf.example).
2025-10-21 00:07:46 +02:00
Nassim Bounouas 73f071ce89 docs: lldap password in docker install corrected 2025-10-18 12:44:59 +02:00
Copilot 28ef6e0c56 example_configs: mailserver,
fix outdated roundcube mounts and filters
2025-10-18 12:20:29 +02:00
Shawn Wilsher a32c8baa25 misc: improve vscode devcontainer experience
This change enables a better IDE experience in vscode by doing two
things:
1) Enables the rust-analyzer, which enables a bunch of features in
   vscode
2) Installs the needed deps for `cargo fmt` to work.
2025-10-14 11:54:48 +02:00
Copilot bf5b76269f server: Refactor config_overrides to use Option::inspect
To reduce cyclomatic complexity.
2025-10-12 20:14:20 +02:00
Hendrik Sievers c09e5c451c example_configs: update SSSD guide 2025-10-11 08:39:25 +02:00
Valentin Tolmer 1382c67de9 server: Extract configuration utilities 2025-10-10 23:28:35 +02:00
Copilot 0f8f9e1244 server: split up update_user_with_transaction 2025-10-10 09:01:52 +02:00
Webysther Sperandio 9a83e68667 app: Set a key for user/group creation buttons
That prevents them from jumping around when changing pages.
2025-10-10 00:28:11 +02:00
Copilot 3f9880ec11 server: Move LDAP search tests to their respective implementation files
Move user and group tests to their respective implementation files

User tests → core/user.rs:
- test_search_regular_user
- test_search_readonly_user
- test_search_member_of
- test_search_user_as_scope
- test_search_users
- test_pwd_changed_time_format

Group tests → core/group.rs:
- test_search_groups
- test_search_groups_by_groupid
- test_search_groups_filter
- test_search_groups_filter_2
- test_search_groups_filter_3
- test_search_group_as_scope

Tests remain in search.rs:
- DSE/schema tests
- General search logic tests
- Filter tests
- Error handling tests
- OU search tests
- Mixed user/group tests
2025-10-10 00:21:32 +02:00
Valentin Tolmer 94007aee58 readme: Add a link to the configuration guide's readme 2025-10-04 23:24:46 +02:00
Copilot 9e9d8e2ab5 graphql: split query.rs and mutation.rs into modular structures (#1311) 2025-10-04 23:09:36 +02:00
Lucas Sylvester 18edd4eb7d example_configs: update portainer group membership and filter attributes
The current descriptions is wrong, and will make portainer try to assign "group" to be a member of "group" instead of the assign the "user" to be a part of "group"
2025-10-04 22:16:00 +02:00
Jonas Resch 3cdf2241ea example_configs: Improve bootstrap.sh and documentation for use with Kubernetes (#1245) 2025-09-28 14:02:06 +02:00
thchha 9021066507 example_configs: Add configuration example for Open WebUI
This documents a working (LDAPS) configuration for using lldap in Open WebUI.

Environment Variables where directly taken from the logs.
The names of the GUI variables are taken from the UI.
Version v0.6.26.

The two configuration options are then put in a table and a small
elaboration + example values are provided.

Other then additionally mounting the ca chain into the container (with appropriate rights) there were not additional steps required.
The ownership of the ca chain will get changed to `chown 501:`.
2025-09-28 13:55:29 +02:00
Copilot fe063272bf chore: add Nix flake-based development environment
Co-authored-by: Kumpelinus <kumpelinus@jat.de>

- Add Nix flake and lockfile for reproducible development environments
- Document Nix-based setup in `docs/nix-development.md`
- Add `.envrc` for direnv integration and update `.gitignore` for Nix/direnv artifacts
- Reference Nix setup in CONTRIBUTING.md
2025-09-28 13:51:41 +02:00
RealSpinelle 59dee0115d example_configs: add missing fields to authentik example 2025-09-24 16:03:56 +02:00
Valentin Tolmer 622274cb1a chore: fix codecov config 2025-09-22 09:34:37 +02:00
Valentin Tolmer 4bad3a9e69 chore: reduce codecov verbosity 2025-09-22 01:01:00 +02:00
Copilot 84fb9b0fd2 Fix pwdChangedTime format to use LDAP GeneralizedTime instead of RFC3339 (#1300)
When querying for pwdChangedTime, the timestamp is returned in RFC3339 format instead of the expected LDAP GeneralizedTime format (YYYYMMDDHHMMSSZ). This causes issues when LLDAP is used with systems like Keycloak that expect proper LDAP timestamp formatting.
2025-09-22 00:42:51 +02:00
Valentin Tolmer 8a803bfb11 ldap: normalize base DN in LdapInfo, reduce memory usage
By making it a &'static, we can have a single allocation for all the threads/async contexts.

This also normalizes the whitespace from the user input; a trailing \n can cause weird issues with clients
2025-09-17 01:03:19 +02:00
Valentin Tolmer f7fe0c6ea0 ldap: fix swapped filter conditions 2025-09-16 14:58:46 +02:00
Valentin Tolmer 8f04843466 ldap: Simplify boolean expressions derived from filters 2025-09-16 01:58:41 +02:00
Hobbabobba 400beafb29 example_config: Add pocket-id 2025-09-16 01:40:08 +02:00
dependabot[bot] 963e58bf1a build(deps): bump tracing-subscriber from 0.3.18 to 0.3.20
Bumps [tracing-subscriber](https://github.com/tokio-rs/tracing) from 0.3.18 to 0.3.20.
- [Release notes](https://github.com/tokio-rs/tracing/releases)
- [Commits](https://github.com/tokio-rs/tracing/compare/tracing-subscriber-0.3.18...tracing-subscriber-0.3.20)

---
updated-dependencies:
- dependency-name: tracing-subscriber
  dependency-version: 0.3.20
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-16 01:10:06 +02:00
Kumpelinus 176c49c78d chore: upgrade Rust toolchain to 1.89 and modernize code with let-chains 2025-09-16 00:48:16 +02:00
Copilot 3d5542996f chore: Add CodeRabbit configuration to reduce agent verbosity 2025-09-16 00:12:45 +02:00
psentee 4590463cdf auth: serialize exp and iat claims as NumericDate to comply with RFC7519 (#1289)
Add `jti` claim to the JWT to avoid hashing collisions
2025-09-15 17:24:59 +02:00
lordratner 85ce481e32 Update opnsense.md
Added instruction for using/not using Constraint Groups. This option is selected by default and the current instructions do not address it, but if it is left on and the Authentication Containers are not updated, the group sync will fail.
2025-09-14 15:53:05 +02:00
Valentin Tolmer f64f8625f1 Add username to password recovey emails 2025-09-14 15:44:37 +02:00
Alexandre Foley c68f9e7cab example_configs: fix the quadlet readme
Several "podman" command should have been "systemctl" from the start.
2025-09-04 22:23:12 +02:00
Copilot 775c5c716d server: gracefully shut down database connection pool 2025-09-04 09:19:03 +02:00
Kumpelinus 89cb59919b server: Add modifyTimestamp and pwdChangedTime attributes (#1265)
Add a modifyTimestamp attribute to LDAP entries for users and groups, and expose pwdChangedTime for users.
These attributes let clients track when an entry (or its password) was last changed.

 -  modifyTimestamp is a server-maintained attribute that updates on any write to user or group entries, including membership changes (on the group side).

 -  pwdChangedTime is set when a user’s password is created or changed.
2025-08-31 14:56:07 +02:00
103 changed files with 5973 additions and 3791 deletions
+46
View File
@@ -0,0 +1,46 @@
# docs: https://docs.coderabbit.ai/reference/yaml-template for full configuration options
tone_instructions: "Be concise"
reviews:
profile: "chill"
high_level_summary: false
review_status: false
commit_status: false
collapse_walkthrough: true
changed_files_summary: false
sequence_diagrams: false
estimate_code_review_effort: false
assess_linked_issues: false
related_issues: false
related_prs: false
suggested_labels: false
suggested_reviewers: false
poem: false
auto_review:
enabled: true
auto_incremental_review: true
finishing_touches:
docstrings:
enabled: false
unit_tests:
enabled: false
pre_merge_checks:
docstrings:
mode: "off"
title:
mode: "off"
description:
mode: "off"
issue_assessment:
mode: "off"
chat:
art: false
auto_reply: false
knowledge_base:
web_search:
enabled: true
code_guidelines:
enabled: false
+1 -1
View File
@@ -1,4 +1,4 @@
FROM rust:1.85
FROM rust:1.89
ARG USERNAME=lldapdev
# We need to keep the user as 1001 to match the GitHub runner's UID.
+20 -2
View File
@@ -1,8 +1,26 @@
{
"name": "LLDAP dev",
"build": { "dockerfile": "Dockerfile" },
"build": {
"dockerfile": "Dockerfile"
},
"customizations": {
"vscode": {
"extensions": [
"rust-lang.rust-analyzer"
],
"settings": {
"rust-analyzer.linkedProjects": [
"./Cargo.toml"
]
}
}
},
"features": {
"ghcr.io/devcontainers/features/rust:1": {}
},
"forwardPorts": [
3890,
17170
]
],
"remoteUser": "lldapdev"
}
+1
View File
@@ -0,0 +1 @@
use flake
+5 -8
View File
@@ -1,19 +1,16 @@
codecov:
require_ci_to_pass: yes
comment:
layout: "header,diff,files"
require_changes: true
require_base: true
require_head: true
layout: "condensed_header, diff, condensed_files"
hide_project_coverage: true
require_changes: "coverage_drop"
coverage:
range: "70...100"
status:
project:
default:
target: "75%"
threshold: "0.1%"
removed_code_behavior: adjust_base
github_checks:
annotations: true
threshold: 5
ignore:
- "app"
- "docs"
+1 -1
View File
@@ -1,5 +1,5 @@
# Keep tracking base image
FROM rust:1.85-slim-bookworm
FROM rust:1.89-slim-bookworm
# Set needed env path
ENV PATH="/opt/armv7l-linux-musleabihf-cross/:/opt/armv7l-linux-musleabihf-cross/bin/:/opt/aarch64-linux-musl-cross/:/opt/aarch64-linux-musl-cross/bin/:/opt/x86_64-linux-musl-cross/:/opt/x86_64-linux-musl-cross/bin/:$PATH"
+31 -21
View File
@@ -24,7 +24,7 @@ on:
env:
CARGO_TERM_COLOR: always
MSRV: "1.89.0"
### CI Docs
@@ -87,8 +87,14 @@ jobs:
image: lldap/rust-dev:latest
steps:
- name: Checkout repository
uses: actions/checkout@v5.0.0
- uses: actions/cache@v4
uses: actions/checkout@v6.0.2
- name: Install Rust
id: toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: "${{ env.MSRV }}"
targets: "wasm32-unknown-unknown"
- uses: actions/cache@v5
with:
path: |
/usr/local/cargo/bin
@@ -99,8 +105,6 @@ jobs:
key: lldap-ui-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
lldap-ui-
- name: Add wasm target (rust)
run: rustup target add wasm32-unknown-unknown
- name: Install wasm-pack with cargo
run: cargo install wasm-pack || true
env:
@@ -110,7 +114,7 @@ jobs:
- name: Check build path
run: ls -al app/
- name: Upload ui artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v7
with:
name: ui
path: app/
@@ -132,8 +136,14 @@ jobs:
CARGO_HOME: ${GITHUB_WORKSPACE}/.cargo
steps:
- name: Checkout repository
uses: actions/checkout@v5.0.0
- uses: actions/cache@v4
uses: actions/checkout@v6.0.2
- name: Install Rust
id: toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: "${{ env.MSRV }}"
targets: "${{ matrix.target }}"
- uses: actions/cache@v5
with:
path: |
.cargo/bin
@@ -149,17 +159,17 @@ jobs:
- name: Check path
run: ls -al target/release
- name: Upload ${{ matrix.target}} lldap artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v7
with:
name: ${{ matrix.target}}-lldap-bin
path: target/${{ matrix.target }}/release/lldap
- name: Upload ${{ matrix.target }} migration tool artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v7
with:
name: ${{ matrix.target }}-lldap_migration_tool-bin
path: target/${{ matrix.target }}/release/lldap_migration_tool
- name: Upload ${{ matrix.target }} password tool artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v7
with:
name: ${{ matrix.target }}-lldap_set_password-bin
path: target/${{ matrix.target }}/release/lldap_set_password
@@ -199,7 +209,7 @@ jobs:
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: x86_64-unknown-linux-musl-lldap-bin
path: bin/
@@ -300,18 +310,18 @@ jobs:
steps:
- name: Checkout scripts
uses: actions/checkout@v5.0.0
uses: actions/checkout@v6.0.2
with:
sparse-checkout: 'scripts'
- name: Download LLDAP artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: x86_64-unknown-linux-musl-lldap-bin
path: bin/
- name: Download LLDAP set password
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: x86_64-unknown-linux-musl-lldap_set_password-bin
path: bin/
@@ -496,21 +506,21 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5.0.0
uses: actions/checkout@v6.0.2
- name: Download all artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
path: bin
- name: Download llap ui artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: ui
path: web
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v4
- name: Setup buildx
uses: docker/setup-buildx-action@v3
with:
@@ -681,7 +691,7 @@ jobs:
contents: write
steps:
- name: Download all artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
path: bin/
- name: Check file
@@ -702,7 +712,7 @@ jobs:
chmod +x bin/*-lldap_set_password
- name: Download llap ui artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v8
with:
name: ui
path: web
+6 -6
View File
@@ -8,7 +8,7 @@ on:
env:
CARGO_TERM_COLOR: always
MSRV: 1.85.0
MSRV: "1.89.0"
jobs:
pre_job:
@@ -34,7 +34,7 @@ jobs:
steps:
- name: Checkout sources
uses: actions/checkout@v5.0.0
uses: actions/checkout@v6.0.2
- name: Install Rust
id: toolchain
uses: dtolnay/rust-toolchain@master
@@ -42,7 +42,7 @@ jobs:
toolchain: "${{ env.MSRV }}"
- uses: Swatinem/rust-cache@v2
- name: Build
run: cargo build --verbose --workspace
run: cargo +${{steps.toolchain.outputs.name}} build --verbose --workspace
- name: Run tests
run: cargo +${{steps.toolchain.outputs.name}} test --verbose --workspace
- name: Generate GraphQL schema
@@ -58,7 +58,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v5.0.0
uses: actions/checkout@v6.0.2
- name: Install Rust
id: toolchain
uses: dtolnay/rust-toolchain@master
@@ -75,7 +75,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v5.0.0
uses: actions/checkout@v6.0.2
- name: Install Rust
id: toolchain
uses: dtolnay/rust-toolchain@master
@@ -94,7 +94,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v5.0.0
uses: actions/checkout@v6.0.2
- name: Install Rust
run: rustup toolchain install nightly --component llvm-tools-preview && rustup component add llvm-tools-preview --toolchain stable-x86_64-unknown-linux-gnu
+5
View File
@@ -29,3 +29,8 @@ recipe.json
lldap_config.toml
cert.pem
key.pem
# Nix
result
result-*
.direnv
+3 -1
View File
@@ -46,7 +46,9 @@ advanced guides (scripting, migrations, ...) you can contribute to.
### Code
If you don't know what to start with, check out the
[good first issues](https://github.com/lldap/lldap/labels/good%20first%20issue).
[good first issues](https://github.com/lldap/lldap/labels/good%20first%20issue).
For an alternative development environment setup, see [docs/nix-development.md](docs/nix-development.md).
Otherwise, if you want to fix a specific bug or implement a feature, make sure
to start by creating an issue for it (if it doesn't already exist). There, we
Generated
+1611 -850
View File
File diff suppressed because it is too large Load Diff
+1 -3
View File
@@ -16,6 +16,7 @@ edition = "2024"
homepage = "https://github.com/lldap/lldap"
license = "GPL-3.0-only"
repository = "https://github.com/lldap/lldap"
rust-version = "1.89.0"
[profile.release]
lto = true
@@ -23,9 +24,6 @@ lto = true
[profile.release.package.lldap_app]
opt-level = 's'
[patch.crates-io.lber]
git = 'https://github.com/inejge/ldap3/'
[workspace.dependencies.sea-orm]
version = "1.1.8"
default-features = false
+2 -2
View File
@@ -83,7 +83,7 @@ MySQL/MariaDB or PostgreSQL.
## Installation
It's possible to install lldap from OCI images ([docker](docs/install.md#with-docker)/[podman](docs/install.md#with-podman)), from [Kubernetes](docs/install.md#with-kubernetes), or from [a regular distribution package manager](docs/install.md/#from-a-package-repository) (Archlinux, Debian, CentOS, Fedora, OpenSuse, Ubuntu, FreeBSD).
It's possible to install lldap from OCI images ([docker](docs/install.md#with-docker)/[podman](docs/install.md#with-podman)), from [Kubernetes](docs/install.md#with-kubernetes), [TrueNAS](docs/install.md#truenas-scale), or from [a regular distribution package manager](docs/install.md/#from-a-package-repository) (Archlinux, Debian, CentOS, Fedora, OpenSuse, Ubuntu, FreeBSD).
Building [from source](docs/install.md#from-source) and [cross-compiling](docs/install.md#cross-compilation) to a different hardware architecture is also supported.
@@ -145,7 +145,7 @@ the relevant details (logs of the service, LLDAP logs with `verbose=true` in
the config).
Some specific clients have been tested to work and come with sample
configuration files, or guides. See the [`example_configs`](example_configs)
configuration files, or guides. See the [`example_configs`](example_configs/README.md)
folder for example configs for integration with specific services.
Integration with Linux accounts is possible, through PAM and nslcd. See [PAM
+1
View File
@@ -8,6 +8,7 @@ authors.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies]
anyhow = "1"
+18 -16
View File
@@ -197,17 +197,19 @@ impl App {
<CreateUserForm/>
},
AppRoute::Index | AppRoute::ListUsers => {
let user_button = html! {
<Link classes="btn btn-primary" to={AppRoute::CreateUser}>
<i class="bi-person-plus me-2"></i>
{"Create a user"}
</Link>
let user_button = |key| {
html! {
<Link classes="btn btn-primary" key={key} to={AppRoute::CreateUser}>
<i class="bi-person-plus me-2"></i>
{"Create a user"}
</Link>
}
};
html! {
<div>
{ user_button.clone() }
{ user_button("top-create-user") }
<UserTable />
{ user_button }
{ user_button("bottom-create-user") }
</div>
}
}
@@ -221,19 +223,19 @@ impl App {
<CreateGroupAttributeForm/>
},
AppRoute::ListGroups => {
let group_button = html! {
<Link classes="btn btn-primary" to={AppRoute::CreateGroup}>
<i class="bi-plus-circle me-2"></i>
{"Create a group"}
</Link>
let group_button = |key| {
html! {
<Link classes="btn btn-primary" key={key} to={AppRoute::CreateGroup}>
<i class="bi-plus-circle me-2"></i>
{"Create a group"}
</Link>
}
};
// Note: There's a weird bug when switching from the users page to the groups page
// where the two groups buttons are at the bottom. I don't know why.
html! {
<div>
{ group_button.clone() }
{ group_button("top-create-group") }
<GroupTable />
{ group_button }
{ group_button("bottom-create-group") }
</div>
}
}
+4
View File
@@ -304,11 +304,14 @@ impl Component for CreateUserForm {
}
fn get_custom_attribute_input(attribute_schema: &Attribute) -> Html {
let mail_is_required = attribute_schema.name.as_str() == "mail";
if attribute_schema.is_list {
html! {
<ListAttributeInput
name={attribute_schema.name.clone()}
attribute_type={attribute_schema.attribute_type}
required={mail_is_required}
/>
}
} else {
@@ -316,6 +319,7 @@ fn get_custom_attribute_input(attribute_schema: &Attribute) -> Html {
<SingleAttributeInput
name={attribute_schema.name.clone()}
attribute_type={attribute_schema.attribute_type}
required={mail_is_required}
/>
}
}
+11 -3
View File
@@ -45,6 +45,8 @@ fn attribute_input(props: &AttributeInputProps) -> Html {
#[derive(Properties, PartialEq)]
struct AttributeLabelProps {
pub name: String,
#[prop_or(false)]
pub required: bool,
}
#[function_component(AttributeLabel)]
fn attribute_label(props: &AttributeLabelProps) -> Html {
@@ -66,7 +68,9 @@ fn attribute_label(props: &AttributeLabelProps) -> Html {
<label for={props.name.clone()}
class="form-label col-4 col-form-label"
>
{props.name[0..1].to_uppercase() + &props.name[1..].replace('_', " ")}{":"}
{props.name[0..1].to_uppercase() + &props.name[1..].replace('_', " ")}
{if props.required { html!{<span class="text-danger">{"*"}</span>} } else { html!{} }}
{":"}
<button
class="btn btn-sm btn-link"
type="button"
@@ -85,13 +89,15 @@ pub struct SingleAttributeInputProps {
pub(crate) attribute_type: AttributeType,
#[prop_or(None)]
pub value: Option<String>,
#[prop_or(false)]
pub required: bool,
}
#[function_component(SingleAttributeInput)]
pub fn single_attribute_input(props: &SingleAttributeInputProps) -> Html {
html! {
<div class="row mb-3">
<AttributeLabel name={props.name.clone()} />
<AttributeLabel name={props.name.clone()} required={props.required} />
<div class="col-8">
<AttributeInput
attribute_type={props.attribute_type}
@@ -108,6 +114,8 @@ pub struct ListAttributeInputProps {
pub(crate) attribute_type: AttributeType,
#[prop_or(vec!())]
pub values: Vec<String>,
#[prop_or(false)]
pub required: bool,
}
pub enum ListAttributeInputMsg {
@@ -160,7 +168,7 @@ impl Component for ListAttributeInput {
let link = &ctx.link();
html! {
<div class="row mb-3">
<AttributeLabel name={props.name.clone()} />
<AttributeLabel name={props.name.clone()} required={props.required} />
<div class="col-8">
{self.indices.iter().map(|&i| html! {
<div class="input-group mb-2" key={i}>
+12 -14
View File
@@ -147,20 +147,18 @@ impl Component for JpegFileInput {
true
}
Msg::FileLoaded(file_name, data) => {
if let Some(avatar) = &mut self.avatar {
if let Some(file) = &avatar.file {
if file.name() == file_name {
if let Result::Ok(data) = data {
if !is_valid_jpeg(data.as_slice()) {
// Clear the selection.
self.avatar = Some(JsFile::default());
// TODO: bail!("Chosen image is not a valid JPEG");
} else {
avatar.contents = Some(data);
return true;
}
}
}
if let Some(avatar) = &mut self.avatar
&& let Some(file) = &avatar.file
&& file.name() == file_name
&& let Result::Ok(data) = data
{
if !is_valid_jpeg(data.as_slice()) {
// Clear the selection.
self.avatar = Some(JsFile::default());
// TODO: bail!("Chosen image is not a valid JPEG");
} else {
avatar.contents = Some(data);
return true;
}
}
self.reader = None;
+1 -1
View File
@@ -27,7 +27,7 @@ pub struct LoginForm {
pub struct FormModel {
#[validate(length(min = 1, message = "Missing username"))]
username: String,
#[validate(length(min = 8, message = "Invalid password. Min length: 8"))]
#[validate(length(min = 1, message = "Missing password"))]
password: String,
}
+25 -6
View File
@@ -8,12 +8,17 @@ pub mod group {
use super::AttributeDescription;
pub fn resolve_group_attribute_description(name: &str) -> Option<AttributeDescription> {
pub fn resolve_group_attribute_description(name: &'_ str) -> Option<AttributeDescription<'_>> {
match name {
"creation_date" => Some(AttributeDescription {
attribute_identifier: name,
attribute_name: "creationdate",
aliases: vec![name, "createtimestamp", "modifytimestamp"],
aliases: vec![name, "createtimestamp"],
}),
"modified_date" => Some(AttributeDescription {
attribute_identifier: name,
attribute_name: "modifydate",
aliases: vec![name, "modifytimestamp"],
}),
"display_name" => Some(AttributeDescription {
attribute_identifier: name,
@@ -34,7 +39,9 @@ pub mod group {
}
}
pub fn resolve_group_attribute_description_or_default(name: &str) -> AttributeDescription {
pub fn resolve_group_attribute_description_or_default(
name: &'_ str,
) -> AttributeDescription<'_> {
match resolve_group_attribute_description(name) {
Some(d) => d,
None => AttributeDescription {
@@ -50,7 +57,7 @@ pub mod user {
use super::AttributeDescription;
pub fn resolve_user_attribute_description(name: &str) -> Option<AttributeDescription> {
pub fn resolve_user_attribute_description(name: &'_ str) -> Option<AttributeDescription<'_>> {
match name {
"avatar" => Some(AttributeDescription {
attribute_identifier: name,
@@ -60,7 +67,17 @@ pub mod user {
"creation_date" => Some(AttributeDescription {
attribute_identifier: name,
attribute_name: "creationdate",
aliases: vec![name, "createtimestamp", "modifytimestamp"],
aliases: vec![name, "createtimestamp"],
}),
"modified_date" => Some(AttributeDescription {
attribute_identifier: name,
attribute_name: "modifydate",
aliases: vec![name, "modifytimestamp"],
}),
"password_modified_date" => Some(AttributeDescription {
attribute_identifier: name,
attribute_name: "passwordmodifydate",
aliases: vec![name, "pwdchangedtime"],
}),
"display_name" => Some(AttributeDescription {
attribute_identifier: name,
@@ -96,7 +113,9 @@ pub mod user {
}
}
pub fn resolve_user_attribute_description_or_default(name: &str) -> AttributeDescription {
pub fn resolve_user_attribute_description_or_default(
name: &'_ str,
) -> AttributeDescription<'_> {
match resolve_user_attribute_description(name) {
Some(d) => d,
None => AttributeDescription {
+1
View File
@@ -2,6 +2,7 @@
#![forbid(non_ascii_idents)]
#![allow(clippy::uninlined_format_args)]
#![allow(clippy::let_unit_value)]
#![allow(clippy::unnecessary_operation)] // Doesn't work well with the html macro.
pub mod components;
pub mod infra;
+1
View File
@@ -7,6 +7,7 @@ edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies]
tracing = "*"
+2
View File
@@ -7,6 +7,7 @@ authors.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[features]
default = ["opaque_server", "opaque_client"]
@@ -24,6 +25,7 @@ generic-array = "0.14"
rand = "0.8"
sha2 = "0.9"
thiserror = "2"
uuid = { version = "1.18.1", features = ["serde"] }
[dependencies.derive_more]
features = ["debug", "display"]
+4
View File
@@ -4,6 +4,7 @@ use chrono::prelude::*;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::fmt;
use uuid::Uuid;
pub mod access_control;
pub mod opaque;
@@ -208,8 +209,11 @@ pub mod types {
#[derive(Clone, Serialize, Deserialize)]
pub struct JWTClaims {
#[serde(with = "chrono::serde::ts_seconds")]
pub exp: DateTime<Utc>,
#[serde(with = "chrono::serde::ts_seconds")]
pub iat: DateTime<Utc>,
pub jti: Uuid,
pub user: String,
pub groups: HashSet<String>,
}
+1
View File
@@ -6,6 +6,7 @@ authors.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[features]
test = []
+1
View File
@@ -6,6 +6,7 @@ authors.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[features]
test = []
+3
View File
@@ -14,6 +14,7 @@ pub struct Model {
pub lowercase_display_name: String,
pub creation_date: chrono::NaiveDateTime,
pub uuid: Uuid,
pub modified_date: chrono::NaiveDateTime,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
@@ -39,6 +40,7 @@ impl From<Model> for lldap_domain::types::Group {
uuid: group.uuid,
users: vec![],
attributes: Vec::new(),
modified_date: group.modified_date,
}
}
}
@@ -51,6 +53,7 @@ impl From<Model> for lldap_domain::types::GroupDetails {
creation_date: group.creation_date,
uuid: group.uuid,
attributes: Vec::new(),
modified_date: group.modified_date,
}
}
}
+8
View File
@@ -21,6 +21,8 @@ pub struct Model {
pub totp_secret: Option<String>,
pub mfa_type: Option<String>,
pub uuid: Uuid,
pub modified_date: chrono::NaiveDateTime,
pub password_modified_date: chrono::NaiveDateTime,
}
impl EntityName for Entity {
@@ -40,6 +42,8 @@ pub enum Column {
TotpSecret,
MfaType,
Uuid,
ModifiedDate,
PasswordModifiedDate,
}
impl ColumnTrait for Column {
@@ -56,6 +60,8 @@ impl ColumnTrait for Column {
Column::TotpSecret => ColumnType::String(StringLen::N(64)),
Column::MfaType => ColumnType::String(StringLen::N(64)),
Column::Uuid => ColumnType::String(StringLen::N(36)),
Column::ModifiedDate => ColumnType::DateTime,
Column::PasswordModifiedDate => ColumnType::DateTime,
}
.def()
}
@@ -121,6 +127,8 @@ impl From<Model> for lldap_domain::types::User {
creation_date: user.creation_date,
uuid: user.uuid,
attributes: Vec::new(),
modified_date: user.modified_date,
password_modified_date: user.password_modified_date,
}
}
}
+1
View File
@@ -9,6 +9,7 @@ edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[features]
test = []
+27
View File
@@ -34,6 +34,24 @@ impl From<Schema> for PublicSchema {
is_hardcoded: true,
is_readonly: true,
},
AttributeSchema {
name: "modified_date".into(),
attribute_type: AttributeType::DateTime,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
is_readonly: true,
},
AttributeSchema {
name: "password_modified_date".into(),
attribute_type: AttributeType::DateTime,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
is_readonly: true,
},
AttributeSchema {
name: "mail".into(),
attribute_type: AttributeType::String,
@@ -85,6 +103,15 @@ impl From<Schema> for PublicSchema {
is_hardcoded: true,
is_readonly: true,
},
AttributeSchema {
name: "modified_date".into(),
attribute_type: AttributeType::DateTime,
is_list: false,
is_visible: true,
is_editable: false,
is_hardcoded: true,
is_readonly: true,
},
AttributeSchema {
name: "uuid".into(),
attribute_type: AttributeType::String,
+8 -8
View File
@@ -7,8 +7,8 @@ use sea_orm::{
DbErr, DeriveValueType, QueryResult, TryFromU64, TryGetError, TryGetable, Value,
entity::IntoActiveValue,
sea_query::{
ArrayType, ColumnType, Nullable, SeaRc, StringLen, ValueTypeErr,
extension::mysql::MySqlType, value::ValueType,
ArrayType, ColumnType, SeaRc, StringLen, ValueTypeErr, extension::mysql::MySqlType,
value::ValueType,
},
};
use serde::{Deserialize, Serialize};
@@ -415,12 +415,6 @@ impl JpegPhoto {
}
}
impl Nullable for JpegPhoto {
fn null() -> Value {
JpegPhoto::null().into()
}
}
impl IntoActiveValue<Serialized> for JpegPhoto {
fn into_active_value(self) -> sea_orm::ActiveValue<Serialized> {
if self.is_empty() {
@@ -546,6 +540,8 @@ pub struct User {
pub creation_date: NaiveDateTime,
pub uuid: Uuid,
pub attributes: Vec<Attribute>,
pub modified_date: NaiveDateTime,
pub password_modified_date: NaiveDateTime,
}
#[cfg(feature = "test")]
@@ -559,6 +555,8 @@ impl Default for User {
creation_date: epoch,
uuid: Uuid::from_name_and_date("", &epoch),
attributes: Vec::new(),
modified_date: epoch,
password_modified_date: epoch,
}
}
}
@@ -654,6 +652,7 @@ pub struct Group {
pub uuid: Uuid,
pub users: Vec<UserId>,
pub attributes: Vec<Attribute>,
pub modified_date: NaiveDateTime,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
@@ -663,6 +662,7 @@ pub struct GroupDetails {
pub creation_date: NaiveDateTime,
pub uuid: Uuid,
pub attributes: Vec<Attribute>,
pub modified_date: NaiveDateTime,
}
#[derive(Debug, Clone, PartialEq, Eq)]
+1
View File
@@ -7,6 +7,7 @@ edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies.serde]
workspace = true
+1
View File
@@ -7,6 +7,7 @@ authors.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies]
anyhow = "*"
@@ -0,0 +1,160 @@
use anyhow::{Context as AnyhowContext, anyhow};
use juniper::FieldResult;
use lldap_access_control::{AdminBackendHandler, ReadonlyBackendHandler};
use lldap_domain::{
deserialize::deserialize_attribute_value,
public_schema::PublicSchema,
requests::CreateGroupRequest,
schema::AttributeList,
types::{Attribute as DomainAttribute, AttributeName, Email},
};
use lldap_domain_handlers::handler::{BackendHandler, ReadSchemaBackendHandler};
use std::{collections::BTreeMap, sync::Arc};
use tracing::{Instrument, Span};
use super::inputs::AttributeValue;
use crate::api::{Context, field_error_callback};
pub struct UnpackedAttributes {
pub email: Option<Email>,
pub display_name: Option<String>,
pub attributes: Vec<DomainAttribute>,
}
pub fn unpack_attributes(
attributes: Vec<AttributeValue>,
schema: &PublicSchema,
is_admin: bool,
) -> FieldResult<UnpackedAttributes> {
let email = attributes
.iter()
.find(|attr| attr.name == "mail")
.cloned()
.map(|attr| deserialize_attribute(&schema.get_schema().user_attributes, attr, is_admin))
.transpose()?
.map(|attr| attr.value.into_string().unwrap())
.map(Email::from);
let display_name = attributes
.iter()
.find(|attr| attr.name == "display_name")
.cloned()
.map(|attr| deserialize_attribute(&schema.get_schema().user_attributes, attr, is_admin))
.transpose()?
.map(|attr| attr.value.into_string().unwrap());
let attributes = attributes
.into_iter()
.filter(|attr| attr.name != "mail" && attr.name != "display_name")
.map(|attr| deserialize_attribute(&schema.get_schema().user_attributes, attr, is_admin))
.collect::<Result<Vec<_>, _>>()?;
Ok(UnpackedAttributes {
email,
display_name,
attributes,
})
}
/// Consolidates caller supplied user fields and attributes into a list of attributes.
///
/// A number of user fields are internally represented as attributes, but are still also
/// available as fields on user objects. This function consolidates these fields and the
/// given attributes into a resulting attribute list. If a value is supplied for both a
/// field and the corresponding attribute, the attribute will take precedence.
pub fn consolidate_attributes(
attributes: Vec<AttributeValue>,
first_name: Option<String>,
last_name: Option<String>,
avatar: Option<String>,
) -> Vec<AttributeValue> {
// Prepare map of the client provided attributes
let mut provided_attributes: BTreeMap<AttributeName, AttributeValue> = attributes
.into_iter()
.map(|x| {
(
x.name.clone().into(),
AttributeValue {
name: x.name.to_ascii_lowercase(),
value: x.value,
},
)
})
.collect::<BTreeMap<_, _>>();
// Prepare list of fallback attribute values
let field_attrs = [
("first_name", first_name),
("last_name", last_name),
("avatar", avatar),
];
for (name, value) in field_attrs.into_iter() {
if let Some(val) = value {
let attr_name: AttributeName = name.into();
provided_attributes
.entry(attr_name)
.or_insert_with(|| AttributeValue {
name: name.to_string(),
value: vec![val],
});
}
}
// Return the values of the resulting map
provided_attributes.into_values().collect()
}
pub async fn create_group_with_details<Handler: BackendHandler>(
context: &Context<Handler>,
request: super::inputs::CreateGroupInput,
span: Span,
) -> FieldResult<crate::query::Group<Handler>> {
let handler = context
.get_admin_handler()
.ok_or_else(field_error_callback(&span, "Unauthorized group creation"))?;
let schema = handler.get_schema().await?;
let public_schema: PublicSchema = schema.into();
let attributes = request
.attributes
.unwrap_or_default()
.into_iter()
.map(|attr| deserialize_attribute(&public_schema.get_schema().group_attributes, attr, true))
.collect::<Result<Vec<_>, _>>()?;
let request = CreateGroupRequest {
display_name: request.display_name.into(),
attributes,
};
let group_id = handler.create_group(request).await?;
let group_details = handler.get_group_details(group_id).instrument(span).await?;
crate::query::Group::<Handler>::from_group_details(group_details, Arc::new(public_schema))
}
pub fn deserialize_attribute(
attribute_schema: &AttributeList,
attribute: AttributeValue,
is_admin: bool,
) -> FieldResult<DomainAttribute> {
let attribute_name = AttributeName::from(attribute.name.as_str());
let attribute_schema = attribute_schema
.get_attribute_schema(&attribute_name)
.ok_or_else(|| anyhow!("Attribute {} is not defined in the schema", attribute.name))?;
if attribute_schema.is_readonly {
return Err(anyhow!(
"Permission denied: Attribute {} is read-only",
attribute.name
)
.into());
}
if !is_admin && !attribute_schema.is_editable {
return Err(anyhow!(
"Permission denied: Attribute {} is not editable by regular users",
attribute.name
)
.into());
}
let deserialized_values = deserialize_attribute_value(
&attribute.value,
attribute_schema.attribute_type,
attribute_schema.is_list,
)
.context(format!("While deserializing attribute {}", attribute.name))?;
Ok(DomainAttribute {
name: attribute_name,
value: deserialized_values,
})
}
@@ -0,0 +1,99 @@
use juniper::{GraphQLInputObject, GraphQLObject};
#[derive(Clone, PartialEq, Eq, Debug, GraphQLInputObject)]
// This conflicts with the attribute values returned by the user/group queries.
#[graphql(name = "AttributeValueInput")]
pub struct AttributeValue {
/// The name of the attribute. It must be present in the schema, and the type informs how
/// to interpret the values.
pub name: String,
/// The values of the attribute.
/// If the attribute is not a list, the vector must contain exactly one element.
/// Integers (signed 64 bits) are represented as strings.
/// Dates are represented as strings in RFC3339 format, e.g. "2019-10-12T07:20:50.52Z".
/// JpegPhotos are represented as base64 encoded strings. They must be valid JPEGs.
pub value: Vec<String>,
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// The details required to create a user.
pub struct CreateUserInput {
pub id: String,
// The email can be specified as an attribute, but one of the two is required.
pub email: Option<String>,
pub display_name: Option<String>,
/// First name of user. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
pub first_name: Option<String>,
/// Last name of user. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
pub last_name: Option<String>,
/// Base64 encoded JpegPhoto. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
pub avatar: Option<String>,
/// Attributes.
pub attributes: Option<Vec<AttributeValue>>,
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// The details required to create a group.
pub struct CreateGroupInput {
pub display_name: String,
/// User-defined attributes.
pub attributes: Option<Vec<AttributeValue>>,
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// The fields that can be updated for a user.
pub struct UpdateUserInput {
pub id: String,
pub email: Option<String>,
pub display_name: Option<String>,
/// First name of user. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
pub first_name: Option<String>,
/// Last name of user. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
pub last_name: Option<String>,
/// Base64 encoded JpegPhoto. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
pub avatar: Option<String>,
/// Attribute names to remove.
/// They are processed before insertions.
pub remove_attributes: Option<Vec<String>>,
/// Inserts or updates the given attributes.
/// For lists, the entire list must be provided.
pub insert_attributes: Option<Vec<AttributeValue>>,
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// The fields that can be updated for a group.
pub struct UpdateGroupInput {
/// The group ID.
pub id: i32,
/// The new display name.
pub display_name: Option<String>,
/// Attribute names to remove.
/// They are processed before insertions.
pub remove_attributes: Option<Vec<String>>,
/// Inserts or updates the given attributes.
/// For lists, the entire list must be provided.
pub insert_attributes: Option<Vec<AttributeValue>>,
}
#[derive(PartialEq, Eq, Debug, GraphQLObject)]
pub struct Success {
ok: bool,
}
impl Success {
pub fn new() -> Self {
Self { ok: true }
}
}
impl Default for Success {
fn default() -> Self {
Self::new()
}
}
@@ -1,27 +1,30 @@
pub mod helpers;
pub mod inputs;
// Re-export public types
pub use inputs::{
AttributeValue, CreateGroupInput, CreateUserInput, Success, UpdateGroupInput, UpdateUserInput,
};
use crate::api::{Context, field_error_callback};
use anyhow::{Context as AnyhowContext, anyhow};
use juniper::{FieldError, FieldResult, GraphQLInputObject, GraphQLObject, graphql_object};
use anyhow::anyhow;
use juniper::{FieldError, FieldResult, graphql_object};
use lldap_access_control::{
AdminBackendHandler, ReadonlyBackendHandler, UserReadableBackendHandler,
UserWriteableBackendHandler,
AdminBackendHandler, UserReadableBackendHandler, UserWriteableBackendHandler,
};
use lldap_domain::{
deserialize::deserialize_attribute_value,
public_schema::PublicSchema,
requests::{
CreateAttributeRequest, CreateGroupRequest, CreateUserRequest, UpdateGroupRequest,
UpdateUserRequest,
},
schema::AttributeList,
types::{
Attribute as DomainAttribute, AttributeName, AttributeType, Email, GroupId,
LdapObjectClass, UserId,
},
requests::{CreateAttributeRequest, CreateUserRequest, UpdateGroupRequest, UpdateUserRequest},
types::{AttributeName, AttributeType, Email, GroupId, LdapObjectClass, UserId},
};
use lldap_domain_handlers::handler::BackendHandler;
use lldap_validation::attributes::{ALLOWED_CHARACTERS_DESCRIPTION, validate_attribute_name};
use std::{collections::BTreeMap, sync::Arc};
use tracing::{Instrument, Span, debug, debug_span};
use std::sync::Arc;
use tracing::{Instrument, debug, debug_span};
use helpers::{
UnpackedAttributes, consolidate_attributes, create_group_with_details, deserialize_attribute,
unpack_attributes,
};
#[derive(PartialEq, Eq, Debug)]
/// The top-level GraphQL mutation type.
@@ -42,183 +45,6 @@ impl<Handler: BackendHandler> Mutation<Handler> {
}
}
}
#[derive(Clone, PartialEq, Eq, Debug, GraphQLInputObject)]
// This conflicts with the attribute values returned by the user/group queries.
#[graphql(name = "AttributeValueInput")]
struct AttributeValue {
/// The name of the attribute. It must be present in the schema, and the type informs how
/// to interpret the values.
name: String,
/// The values of the attribute.
/// If the attribute is not a list, the vector must contain exactly one element.
/// Integers (signed 64 bits) are represented as strings.
/// Dates are represented as strings in RFC3339 format, e.g. "2019-10-12T07:20:50.52Z".
/// JpegPhotos are represented as base64 encoded strings. They must be valid JPEGs.
value: Vec<String>,
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// The details required to create a user.
pub struct CreateUserInput {
id: String,
// The email can be specified as an attribute, but one of the two is required.
email: Option<String>,
display_name: Option<String>,
/// First name of user. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
first_name: Option<String>,
/// Last name of user. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
last_name: Option<String>,
/// Base64 encoded JpegPhoto. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
avatar: Option<String>,
/// Attributes.
attributes: Option<Vec<AttributeValue>>,
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// The details required to create a group.
pub struct CreateGroupInput {
display_name: String,
/// User-defined attributes.
attributes: Option<Vec<AttributeValue>>,
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// The fields that can be updated for a user.
pub struct UpdateUserInput {
id: String,
email: Option<String>,
display_name: Option<String>,
/// First name of user. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
first_name: Option<String>,
/// Last name of user. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
last_name: Option<String>,
/// Base64 encoded JpegPhoto. Deprecated: use attribute instead.
/// If both field and corresponding attribute is supplied, the attribute will take precedence.
avatar: Option<String>,
/// Attribute names to remove.
/// They are processed before insertions.
remove_attributes: Option<Vec<String>>,
/// Inserts or updates the given attributes.
/// For lists, the entire list must be provided.
insert_attributes: Option<Vec<AttributeValue>>,
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// The fields that can be updated for a group.
pub struct UpdateGroupInput {
/// The group ID.
id: i32,
/// The new display name.
display_name: Option<String>,
/// Attribute names to remove.
/// They are processed before insertions.
remove_attributes: Option<Vec<String>>,
/// Inserts or updates the given attributes.
/// For lists, the entire list must be provided.
insert_attributes: Option<Vec<AttributeValue>>,
}
#[derive(PartialEq, Eq, Debug, GraphQLObject)]
pub struct Success {
ok: bool,
}
impl Success {
fn new() -> Self {
Self { ok: true }
}
}
struct UnpackedAttributes {
email: Option<Email>,
display_name: Option<String>,
attributes: Vec<DomainAttribute>,
}
fn unpack_attributes(
attributes: Vec<AttributeValue>,
schema: &PublicSchema,
is_admin: bool,
) -> FieldResult<UnpackedAttributes> {
let email = attributes
.iter()
.find(|attr| attr.name == "mail")
.cloned()
.map(|attr| deserialize_attribute(&schema.get_schema().user_attributes, attr, is_admin))
.transpose()?
.map(|attr| attr.value.into_string().unwrap())
.map(Email::from);
let display_name = attributes
.iter()
.find(|attr| attr.name == "display_name")
.cloned()
.map(|attr| deserialize_attribute(&schema.get_schema().user_attributes, attr, is_admin))
.transpose()?
.map(|attr| attr.value.into_string().unwrap());
let attributes = attributes
.into_iter()
.filter(|attr| attr.name != "mail" && attr.name != "display_name")
.map(|attr| deserialize_attribute(&schema.get_schema().user_attributes, attr, is_admin))
.collect::<Result<Vec<_>, _>>()?;
Ok(UnpackedAttributes {
email,
display_name,
attributes,
})
}
/// Consolidates caller supplied user fields and attributes into a list of attributes.
///
/// A number of user fields are internally represented as attributes, but are still also
/// available as fields on user objects. This function consolidates these fields and the
/// given attributes into a resulting attribute list. If a value is supplied for both a
/// field and the corresponding attribute, the attribute will take precedence.
fn consolidate_attributes(
attributes: Vec<AttributeValue>,
first_name: Option<String>,
last_name: Option<String>,
avatar: Option<String>,
) -> Vec<AttributeValue> {
// Prepare map of the client provided attributes
let mut provided_attributes: BTreeMap<AttributeName, AttributeValue> = attributes
.into_iter()
.map(|x| {
(
x.name.clone().into(),
AttributeValue {
name: x.name.to_ascii_lowercase(),
value: x.value,
},
)
})
.collect::<BTreeMap<_, _>>();
// Prepare list of fallback attribute values
let field_attrs = [
("first_name", first_name),
("last_name", last_name),
("avatar", avatar),
];
for (name, value) in field_attrs.into_iter() {
if let Some(val) = value {
let attr_name: AttributeName = name.into();
provided_attributes
.entry(attr_name)
.or_insert_with(|| AttributeValue {
name: name.to_string(),
value: vec![val],
});
}
}
// Return the values of the resulting map
provided_attributes.into_values().collect()
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> Mutation<Handler> {
async fn create_user(
@@ -721,66 +547,6 @@ impl<Handler: BackendHandler> Mutation<Handler> {
Ok(Success::new())
}
}
async fn create_group_with_details<Handler: BackendHandler>(
context: &Context<Handler>,
request: CreateGroupInput,
span: Span,
) -> FieldResult<super::query::Group<Handler>> {
let handler = context
.get_admin_handler()
.ok_or_else(field_error_callback(&span, "Unauthorized group creation"))?;
let schema = handler.get_schema().await?;
let attributes = request
.attributes
.unwrap_or_default()
.into_iter()
.map(|attr| deserialize_attribute(&schema.get_schema().group_attributes, attr, true))
.collect::<Result<Vec<_>, _>>()?;
let request = CreateGroupRequest {
display_name: request.display_name.into(),
attributes,
};
let group_id = handler.create_group(request).await?;
let group_details = handler.get_group_details(group_id).instrument(span).await?;
super::query::Group::<Handler>::from_group_details(group_details, Arc::new(schema))
}
fn deserialize_attribute(
attribute_schema: &AttributeList,
attribute: AttributeValue,
is_admin: bool,
) -> FieldResult<DomainAttribute> {
let attribute_name = AttributeName::from(attribute.name.as_str());
let attribute_schema = attribute_schema
.get_attribute_schema(&attribute_name)
.ok_or_else(|| anyhow!("Attribute {} is not defined in the schema", attribute.name))?;
if attribute_schema.is_readonly {
return Err(anyhow!(
"Permission denied: Attribute {} is read-only",
attribute.name
)
.into());
}
if !is_admin && !attribute_schema.is_editable {
return Err(anyhow!(
"Permission denied: Attribute {} is not editable by regular users",
attribute.name
)
.into());
}
let deserialized_values = deserialize_attribute_value(
&attribute.value,
attribute_schema.attribute_type,
attribute_schema.is_list,
)
.context(format!("While deserializing attribute {}", attribute.name))?;
Ok(DomainAttribute {
name: attribute_name,
value: deserialized_values,
})
}
#[cfg(test)]
mod tests {
use super::*;
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,267 @@
use chrono::TimeZone;
use juniper::{FieldResult, graphql_object};
use lldap_domain::public_schema::PublicSchema;
use lldap_domain::schema::AttributeList as DomainAttributeList;
use lldap_domain::schema::AttributeSchema as DomainAttributeSchema;
use lldap_domain::types::{Attribute as DomainAttribute, AttributeValue as DomainAttributeValue};
use lldap_domain::types::{Cardinality, Group as DomainGroup, GroupDetails, User as DomainUser};
use lldap_domain_handlers::handler::BackendHandler;
use serde::{Deserialize, Serialize};
use crate::api::Context;
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct AttributeSchema<Handler: BackendHandler> {
schema: DomainAttributeSchema,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> AttributeSchema<Handler> {
fn name(&self) -> String {
self.schema.name.to_string()
}
fn attribute_type(&self) -> lldap_domain::types::AttributeType {
self.schema.attribute_type
}
fn is_list(&self) -> bool {
self.schema.is_list
}
fn is_visible(&self) -> bool {
self.schema.is_visible
}
fn is_editable(&self) -> bool {
self.schema.is_editable
}
fn is_hardcoded(&self) -> bool {
self.schema.is_hardcoded
}
fn is_readonly(&self) -> bool {
self.schema.is_readonly
}
}
impl<Handler: BackendHandler> Clone for AttributeSchema<Handler> {
fn clone(&self) -> Self {
Self {
schema: self.schema.clone(),
_phantom: std::marker::PhantomData,
}
}
}
impl<Handler: BackendHandler> From<DomainAttributeSchema> for AttributeSchema<Handler> {
fn from(value: DomainAttributeSchema) -> Self {
Self {
schema: value,
_phantom: std::marker::PhantomData,
}
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct AttributeValue<Handler: BackendHandler> {
pub(super) attribute: DomainAttribute,
pub(super) schema: AttributeSchema<Handler>,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> AttributeValue<Handler> {
fn name(&self) -> &str {
self.attribute.name.as_str()
}
fn value(&self) -> FieldResult<Vec<String>> {
Ok(serialize_attribute_to_graphql(&self.attribute.value))
}
fn schema(&self) -> &AttributeSchema<Handler> {
&self.schema
}
}
impl<Handler: BackendHandler> AttributeValue<Handler> {
fn from_value(attr: DomainAttribute, schema: DomainAttributeSchema) -> Self {
Self {
attribute: attr,
schema: AttributeSchema::<Handler> {
schema,
_phantom: std::marker::PhantomData,
},
_phantom: std::marker::PhantomData,
}
}
pub(super) fn name(&self) -> &str {
self.attribute.name.as_str()
}
}
impl<Handler: BackendHandler> Clone for AttributeValue<Handler> {
fn clone(&self) -> Self {
Self {
attribute: self.attribute.clone(),
schema: self.schema.clone(),
_phantom: std::marker::PhantomData,
}
}
}
pub fn serialize_attribute_to_graphql(attribute_value: &DomainAttributeValue) -> Vec<String> {
let convert_date = |&date| chrono::Utc.from_utc_datetime(&date).to_rfc3339();
match attribute_value {
DomainAttributeValue::String(Cardinality::Singleton(s)) => vec![s.clone()],
DomainAttributeValue::String(Cardinality::Unbounded(l)) => l.clone(),
DomainAttributeValue::Integer(Cardinality::Singleton(i)) => vec![i.to_string()],
DomainAttributeValue::Integer(Cardinality::Unbounded(l)) => {
l.iter().map(|i| i.to_string()).collect()
}
DomainAttributeValue::DateTime(Cardinality::Singleton(dt)) => vec![convert_date(dt)],
DomainAttributeValue::DateTime(Cardinality::Unbounded(l)) => {
l.iter().map(convert_date).collect()
}
DomainAttributeValue::JpegPhoto(Cardinality::Singleton(p)) => vec![String::from(p)],
DomainAttributeValue::JpegPhoto(Cardinality::Unbounded(l)) => {
l.iter().map(String::from).collect()
}
}
}
impl<Handler: BackendHandler> AttributeValue<Handler> {
fn from_schema(a: DomainAttribute, schema: &DomainAttributeList) -> Option<Self> {
schema
.get_attribute_schema(&a.name)
.map(|s| AttributeValue::<Handler>::from_value(a, s.clone()))
}
pub fn user_attributes_from_schema(
user: &mut DomainUser,
schema: &PublicSchema,
) -> Vec<AttributeValue<Handler>> {
let user_attributes = std::mem::take(&mut user.attributes);
let mut all_attributes = schema
.get_schema()
.user_attributes
.attributes
.iter()
.filter(|a| a.is_hardcoded)
.flat_map(|attribute_schema| {
let value: Option<DomainAttributeValue> = match attribute_schema.name.as_str() {
"user_id" => Some(user.user_id.clone().into_string().into()),
"creation_date" => Some(user.creation_date.into()),
"modified_date" => Some(user.modified_date.into()),
"password_modified_date" => Some(user.password_modified_date.into()),
"mail" => Some(user.email.clone().into_string().into()),
"uuid" => Some(user.uuid.clone().into_string().into()),
"display_name" => user.display_name.as_ref().map(|d| d.clone().into()),
"avatar" | "first_name" | "last_name" => None,
_ => panic!("Unexpected hardcoded attribute: {}", attribute_schema.name),
};
value.map(|v| (attribute_schema, v))
})
.map(|(attribute_schema, value)| {
AttributeValue::<Handler>::from_value(
DomainAttribute {
name: attribute_schema.name.clone(),
value,
},
attribute_schema.clone(),
)
})
.collect::<Vec<_>>();
user_attributes
.into_iter()
.flat_map(|a| {
AttributeValue::<Handler>::from_schema(a, &schema.get_schema().user_attributes)
})
.for_each(|value| all_attributes.push(value));
all_attributes
}
pub fn group_attributes_from_schema(
group: &mut DomainGroup,
schema: &PublicSchema,
) -> Vec<AttributeValue<Handler>> {
let group_attributes = std::mem::take(&mut group.attributes);
let mut all_attributes = schema
.get_schema()
.group_attributes
.attributes
.iter()
.filter(|a| a.is_hardcoded)
.map(|attribute_schema| {
(
attribute_schema,
match attribute_schema.name.as_str() {
"group_id" => (group.id.0 as i64).into(),
"creation_date" => group.creation_date.into(),
"modified_date" => group.modified_date.into(),
"uuid" => group.uuid.clone().into_string().into(),
"display_name" => group.display_name.clone().into_string().into(),
_ => panic!("Unexpected hardcoded attribute: {}", attribute_schema.name),
},
)
})
.map(|(attribute_schema, value)| {
AttributeValue::<Handler>::from_value(
DomainAttribute {
name: attribute_schema.name.clone(),
value,
},
attribute_schema.clone(),
)
})
.collect::<Vec<_>>();
group_attributes
.into_iter()
.flat_map(|a| {
AttributeValue::<Handler>::from_schema(a, &schema.get_schema().group_attributes)
})
.for_each(|value| all_attributes.push(value));
all_attributes
}
pub fn group_details_attributes_from_schema(
group: &mut GroupDetails,
schema: &PublicSchema,
) -> Vec<AttributeValue<Handler>> {
let group_attributes = std::mem::take(&mut group.attributes);
let mut all_attributes = schema
.get_schema()
.group_attributes
.attributes
.iter()
.filter(|a| a.is_hardcoded)
.map(|attribute_schema| {
(
attribute_schema,
match attribute_schema.name.as_str() {
"group_id" => (group.group_id.0 as i64).into(),
"creation_date" => group.creation_date.into(),
"modified_date" => group.modified_date.into(),
"uuid" => group.uuid.clone().into_string().into(),
"display_name" => group.display_name.clone().into_string().into(),
_ => panic!("Unexpected hardcoded attribute: {}", attribute_schema.name),
},
)
})
.map(|(attribute_schema, value)| {
AttributeValue::<Handler>::from_value(
DomainAttribute {
name: attribute_schema.name.clone(),
value,
},
attribute_schema.clone(),
)
})
.collect::<Vec<_>>();
group_attributes
.into_iter()
.flat_map(|a| {
AttributeValue::<Handler>::from_schema(a, &schema.get_schema().group_attributes)
})
.for_each(|value| all_attributes.push(value));
all_attributes
}
}
@@ -0,0 +1,89 @@
use anyhow::Context as AnyhowContext;
use juniper::{FieldResult, GraphQLInputObject};
use lldap_domain::deserialize::deserialize_attribute_value;
use lldap_domain::public_schema::PublicSchema;
use lldap_domain::types::GroupId;
use lldap_domain::types::UserId;
use lldap_domain_handlers::handler::UserRequestFilter as DomainRequestFilter;
use lldap_domain_model::model::UserColumn;
use lldap_ldap::{UserFieldType, map_user_field};
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
/// A filter for requests, specifying a boolean expression based on field constraints. Only one of
/// the fields can be set at a time.
pub struct RequestFilter {
any: Option<Vec<RequestFilter>>,
all: Option<Vec<RequestFilter>>,
not: Option<Box<RequestFilter>>,
eq: Option<EqualityConstraint>,
member_of: Option<String>,
member_of_id: Option<i32>,
}
impl RequestFilter {
pub fn try_into_domain_filter(self, schema: &PublicSchema) -> FieldResult<DomainRequestFilter> {
match (
self.eq,
self.any,
self.all,
self.not,
self.member_of,
self.member_of_id,
) {
(Some(eq), None, None, None, None, None) => {
match map_user_field(&eq.field.as_str().into(), schema) {
UserFieldType::NoMatch => {
Err(format!("Unknown request filter: {}", &eq.field).into())
}
UserFieldType::PrimaryField(UserColumn::UserId) => {
Ok(DomainRequestFilter::UserId(UserId::new(&eq.value)))
}
UserFieldType::PrimaryField(column) => {
Ok(DomainRequestFilter::Equality(column, eq.value))
}
UserFieldType::Attribute(name, typ, false) => {
let value = deserialize_attribute_value(&[eq.value], typ, false)
.context(format!("While deserializing attribute {}", &name))?;
Ok(DomainRequestFilter::AttributeEquality(name, value))
}
UserFieldType::Attribute(_, _, true) => {
Err("Equality not supported for list fields".into())
}
UserFieldType::MemberOf => Ok(DomainRequestFilter::MemberOf(eq.value.into())),
UserFieldType::ObjectClass | UserFieldType::Dn | UserFieldType::EntryDn => {
Err("Ldap fields not supported in request filter".into())
}
}
}
(None, Some(any), None, None, None, None) => Ok(DomainRequestFilter::Or(
any.into_iter()
.map(|f| f.try_into_domain_filter(schema))
.collect::<FieldResult<Vec<_>>>()?,
)),
(None, None, Some(all), None, None, None) => Ok(DomainRequestFilter::And(
all.into_iter()
.map(|f| f.try_into_domain_filter(schema))
.collect::<FieldResult<Vec<_>>>()?,
)),
(None, None, None, Some(not), None, None) => Ok(DomainRequestFilter::Not(Box::new(
(*not).try_into_domain_filter(schema)?,
))),
(None, None, None, None, Some(group), None) => {
Ok(DomainRequestFilter::MemberOf(group.into()))
}
(None, None, None, None, None, Some(group_id)) => {
Ok(DomainRequestFilter::MemberOfId(GroupId(group_id)))
}
(None, None, None, None, None, None) => {
Err("No field specified in request filter".into())
}
_ => Err("Multiple fields specified in request filter".into()),
}
}
}
#[derive(PartialEq, Eq, Debug, GraphQLInputObject)]
pub struct EqualityConstraint {
field: String,
value: String,
}
+123
View File
@@ -0,0 +1,123 @@
use chrono::TimeZone;
use juniper::{FieldResult, graphql_object};
use lldap_access_control::ReadonlyBackendHandler;
use lldap_domain::public_schema::PublicSchema;
use lldap_domain::types::{Group as DomainGroup, GroupDetails, GroupId};
use lldap_domain_handlers::handler::{BackendHandler, UserRequestFilter as DomainRequestFilter};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tracing::{Instrument, debug, debug_span};
use super::attribute::AttributeValue;
use super::user::User;
use crate::api::{Context, field_error_callback};
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
/// Represents a single group.
pub struct Group<Handler: BackendHandler> {
pub group_id: i32,
pub display_name: String,
creation_date: chrono::NaiveDateTime,
uuid: String,
attributes: Vec<AttributeValue<Handler>>,
pub schema: Arc<PublicSchema>,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
impl<Handler: BackendHandler> Group<Handler> {
pub fn from_group(
mut group: DomainGroup,
schema: Arc<PublicSchema>,
) -> FieldResult<Group<Handler>> {
let attributes =
AttributeValue::<Handler>::group_attributes_from_schema(&mut group, &schema);
Ok(Self {
group_id: group.id.0,
display_name: group.display_name.to_string(),
creation_date: group.creation_date,
uuid: group.uuid.into_string(),
attributes,
schema,
_phantom: std::marker::PhantomData,
})
}
pub fn from_group_details(
mut group_details: GroupDetails,
schema: Arc<PublicSchema>,
) -> FieldResult<Group<Handler>> {
let attributes = AttributeValue::<Handler>::group_details_attributes_from_schema(
&mut group_details,
&schema,
);
Ok(Self {
group_id: group_details.group_id.0,
display_name: group_details.display_name.to_string(),
creation_date: group_details.creation_date,
uuid: group_details.uuid.into_string(),
attributes,
schema,
_phantom: std::marker::PhantomData,
})
}
}
impl<Handler: BackendHandler> Clone for Group<Handler> {
fn clone(&self) -> Self {
Self {
group_id: self.group_id,
display_name: self.display_name.clone(),
creation_date: self.creation_date,
uuid: self.uuid.clone(),
attributes: self.attributes.clone(),
schema: self.schema.clone(),
_phantom: std::marker::PhantomData,
}
}
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> Group<Handler> {
fn id(&self) -> i32 {
self.group_id
}
fn display_name(&self) -> String {
self.display_name.clone()
}
fn creation_date(&self) -> chrono::DateTime<chrono::Utc> {
chrono::Utc.from_utc_datetime(&self.creation_date)
}
fn uuid(&self) -> String {
self.uuid.clone()
}
/// User-defined attributes.
fn attributes(&self) -> &[AttributeValue<Handler>] {
&self.attributes
}
/// The groups to which this user belongs.
async fn users(&self, context: &Context<Handler>) -> FieldResult<Vec<User<Handler>>> {
let span = debug_span!("[GraphQL query] group::users");
span.in_scope(|| {
debug!(name = %self.display_name);
});
let handler = context
.get_readonly_handler()
.ok_or_else(field_error_callback(
&span,
"Unauthorized access to group data",
))?;
let domain_users = handler
.list_users(
Some(DomainRequestFilter::MemberOfId(GroupId(self.group_id))),
false,
)
.instrument(span)
.await?;
domain_users
.into_iter()
.map(|u| User::<Handler>::from_user_and_groups(u, self.schema.clone()))
.collect()
}
}
+539
View File
@@ -0,0 +1,539 @@
pub mod attribute;
pub mod filters;
pub mod group;
pub mod schema;
pub mod user;
// Re-export public types
pub use attribute::{AttributeSchema, AttributeValue, serialize_attribute_to_graphql};
pub use filters::{EqualityConstraint, RequestFilter};
pub use group::Group;
pub use schema::{AttributeList, ObjectClassInfo, Schema};
pub use user::User;
use juniper::{FieldResult, graphql_object};
use lldap_access_control::{ReadonlyBackendHandler, UserReadableBackendHandler};
use lldap_domain::public_schema::PublicSchema;
use lldap_domain::types::{GroupId, UserId};
use lldap_domain_handlers::handler::{BackendHandler, ReadSchemaBackendHandler};
use std::sync::Arc;
use tracing::{Instrument, Span, debug, debug_span};
use crate::api::{Context, field_error_callback};
#[derive(PartialEq, Eq, Debug)]
/// The top-level GraphQL query type.
pub struct Query<Handler: BackendHandler> {
_phantom: std::marker::PhantomData<Box<Handler>>,
}
impl<Handler: BackendHandler> Default for Query<Handler> {
fn default() -> Self {
Self::new()
}
}
impl<Handler: BackendHandler> Query<Handler> {
pub fn new() -> Self {
Self {
_phantom: std::marker::PhantomData,
}
}
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> Query<Handler> {
fn api_version() -> &'static str {
"1.0"
}
pub async fn user(context: &Context<Handler>, user_id: String) -> FieldResult<User<Handler>> {
use anyhow::Context;
let span = debug_span!("[GraphQL query] user");
span.in_scope(|| {
debug!(?user_id);
});
let user_id = urlencoding::decode(&user_id).context("Invalid user parameter")?;
let user_id = UserId::new(&user_id);
let handler = context
.get_readable_handler(&user_id)
.ok_or_else(field_error_callback(
&span,
"Unauthorized access to user data",
))?;
let schema = Arc::new(self.get_schema(context, span.clone()).await?);
let user = handler.get_user_details(&user_id).instrument(span).await?;
User::<Handler>::from_user(user, schema)
}
async fn users(
context: &Context<Handler>,
#[graphql(name = "where")] filters: Option<RequestFilter>,
) -> FieldResult<Vec<User<Handler>>> {
let span = debug_span!("[GraphQL query] users");
span.in_scope(|| {
debug!(?filters);
});
let handler = context
.get_readonly_handler()
.ok_or_else(field_error_callback(
&span,
"Unauthorized access to user list",
))?;
let schema = Arc::new(self.get_schema(context, span.clone()).await?);
let users = handler
.list_users(
filters
.map(|f| f.try_into_domain_filter(&schema))
.transpose()?,
false,
)
.instrument(span)
.await?;
users
.into_iter()
.map(|u| User::<Handler>::from_user_and_groups(u, schema.clone()))
.collect()
}
async fn groups(context: &Context<Handler>) -> FieldResult<Vec<Group<Handler>>> {
let span = debug_span!("[GraphQL query] groups");
let handler = context
.get_readonly_handler()
.ok_or_else(field_error_callback(
&span,
"Unauthorized access to group list",
))?;
let schema = Arc::new(self.get_schema(context, span.clone()).await?);
let domain_groups = handler.list_groups(None).instrument(span).await?;
domain_groups
.into_iter()
.map(|g| Group::<Handler>::from_group(g, schema.clone()))
.collect()
}
async fn group(context: &Context<Handler>, group_id: i32) -> FieldResult<Group<Handler>> {
let span = debug_span!("[GraphQL query] group");
span.in_scope(|| {
debug!(?group_id);
});
let handler = context
.get_readonly_handler()
.ok_or_else(field_error_callback(
&span,
"Unauthorized access to group data",
))?;
let schema = Arc::new(self.get_schema(context, span.clone()).await?);
let group_details = handler
.get_group_details(GroupId(group_id))
.instrument(span)
.await?;
Group::<Handler>::from_group_details(group_details, schema.clone())
}
async fn schema(context: &Context<Handler>) -> FieldResult<Schema<Handler>> {
let span = debug_span!("[GraphQL query] get_schema");
self.get_schema(context, span).await.map(Into::into)
}
}
impl<Handler: BackendHandler> Query<Handler> {
async fn get_schema(
&self,
context: &Context<Handler>,
span: Span,
) -> FieldResult<PublicSchema> {
let handler = context
.handler
.get_user_restricted_lister_handler(&context.validation_result);
Ok(handler
.get_schema()
.instrument(span)
.await
.map(Into::<PublicSchema>::into)?)
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::TimeZone;
use juniper::{
DefaultScalarValue, EmptyMutation, EmptySubscription, GraphQLType, RootNode, Variables,
execute, graphql_value,
};
use lldap_auth::access_control::{Permission, ValidationResults};
use lldap_domain::schema::AttributeSchema as DomainAttributeSchema;
use lldap_domain::types::{Attribute as DomainAttribute, GroupDetails, User as DomainUser};
use lldap_domain::{
schema::{AttributeList, Schema},
types::{AttributeName, AttributeType, LdapObjectClass},
};
use lldap_domain_model::model::UserColumn;
use lldap_test_utils::{MockTestBackendHandler, setup_default_schema};
use mockall::predicate::eq;
use pretty_assertions::assert_eq;
use std::collections::HashSet;
fn schema<'q, C, Q>(query_root: Q) -> RootNode<'q, Q, EmptyMutation<C>, EmptySubscription<C>>
where
Q: GraphQLType<DefaultScalarValue, Context = C, TypeInfo = ()> + 'q,
{
RootNode::new(
query_root,
EmptyMutation::<C>::new(),
EmptySubscription::<C>::new(),
)
}
#[tokio::test]
async fn get_user_by_id() {
const QUERY: &str = r#"{
user(userId: "bob") {
id
email
creationDate
firstName
lastName
uuid
attributes {
name
value
}
groups {
id
displayName
creationDate
uuid
attributes {
name
value
}
}
}
}"#;
let mut mock = MockTestBackendHandler::new();
mock.expect_get_schema().returning(|| {
Ok(Schema {
user_attributes: AttributeList {
attributes: vec![
DomainAttributeSchema {
name: "first_name".into(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
is_readonly: false,
},
DomainAttributeSchema {
name: "last_name".into(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: true,
is_readonly: false,
},
],
},
group_attributes: AttributeList {
attributes: vec![DomainAttributeSchema {
name: "club_name".into(),
attribute_type: AttributeType::String,
is_list: false,
is_visible: true,
is_editable: true,
is_hardcoded: false,
is_readonly: false,
}],
},
extra_user_object_classes: vec![
LdapObjectClass::from("customUserClass"),
LdapObjectClass::from("myUserClass"),
],
extra_group_object_classes: vec![LdapObjectClass::from("customGroupClass")],
})
});
mock.expect_get_user_details()
.with(eq(UserId::new("bob")))
.return_once(|_| {
Ok(DomainUser {
user_id: UserId::new("bob"),
email: "bob@bobbers.on".into(),
display_name: None,
creation_date: chrono::Utc.timestamp_millis_opt(42).unwrap().naive_utc(),
modified_date: chrono::Utc.timestamp_opt(0, 0).unwrap().naive_utc(),
password_modified_date: chrono::Utc.timestamp_opt(0, 0).unwrap().naive_utc(),
uuid: lldap_domain::types::Uuid::from_name_and_date(
"bob",
&chrono::Utc.timestamp_millis_opt(42).unwrap().naive_utc(),
),
attributes: vec![
DomainAttribute {
name: "first_name".into(),
value: "Bob".to_string().into(),
},
DomainAttribute {
name: "last_name".into(),
value: "Bobberson".to_string().into(),
},
],
})
});
let mut groups = HashSet::new();
groups.insert(GroupDetails {
group_id: GroupId(3),
display_name: "Bobbersons".into(),
creation_date: chrono::Utc.timestamp_nanos(42).naive_utc(),
uuid: lldap_domain::types::Uuid::from_name_and_date(
"Bobbersons",
&chrono::Utc.timestamp_nanos(42).naive_utc(),
),
attributes: vec![DomainAttribute {
name: "club_name".into(),
value: "Gang of Four".to_string().into(),
}],
modified_date: chrono::Utc.timestamp_nanos(42).naive_utc(),
});
groups.insert(GroupDetails {
group_id: GroupId(7),
display_name: "Jefferees".into(),
creation_date: chrono::Utc.timestamp_nanos(12).naive_utc(),
uuid: lldap_domain::types::Uuid::from_name_and_date(
"Jefferees",
&chrono::Utc.timestamp_nanos(12).naive_utc(),
),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_nanos(12).naive_utc(),
});
mock.expect_get_user_groups()
.with(eq(UserId::new("bob")))
.return_once(|_| Ok(groups));
let context = Context::<MockTestBackendHandler>::new_for_tests(
mock,
ValidationResults {
user: UserId::new("admin"),
permission: Permission::Admin,
},
);
let schema = schema(Query::<MockTestBackendHandler>::new());
let result = execute(QUERY, None, &schema, &Variables::new(), &context).await;
assert!(result.is_ok(), "Query failed: {:?}", result);
}
#[tokio::test]
async fn list_users() {
const QUERY: &str = r#"{
users(filters: {
any: [
{eq: {
field: "id"
value: "bob"
}},
{eq: {
field: "email"
value: "robert@bobbers.on"
}},
{eq: {
field: "firstName"
value: "robert"
}}
]}) {
id
email
}
}"#;
let mut mock = MockTestBackendHandler::new();
setup_default_schema(&mut mock);
mock.expect_list_users()
.with(
eq(Some(lldap_domain_handlers::handler::UserRequestFilter::Or(
vec![
lldap_domain_handlers::handler::UserRequestFilter::UserId(UserId::new(
"bob",
)),
lldap_domain_handlers::handler::UserRequestFilter::Equality(
UserColumn::Email,
"robert@bobbers.on".to_owned(),
),
lldap_domain_handlers::handler::UserRequestFilter::AttributeEquality(
AttributeName::from("first_name"),
"robert".to_string().into(),
),
],
))),
eq(false),
)
.return_once(|_, _| {
Ok(vec![
lldap_domain::types::UserAndGroups {
user: DomainUser {
user_id: UserId::new("bob"),
email: "bob@bobbers.on".into(),
display_name: None,
creation_date: chrono::Utc.timestamp_opt(0, 0).unwrap().naive_utc(),
modified_date: chrono::Utc.timestamp_opt(0, 0).unwrap().naive_utc(),
password_modified_date: chrono::Utc
.timestamp_opt(0, 0)
.unwrap()
.naive_utc(),
uuid: lldap_domain::types::Uuid::from_name_and_date(
"bob",
&chrono::Utc.timestamp_opt(0, 0).unwrap().naive_utc(),
),
attributes: Vec::new(),
},
groups: None,
},
lldap_domain::types::UserAndGroups {
user: DomainUser {
user_id: UserId::new("robert"),
email: "robert@bobbers.on".into(),
display_name: None,
creation_date: chrono::Utc.timestamp_opt(0, 0).unwrap().naive_utc(),
modified_date: chrono::Utc.timestamp_opt(0, 0).unwrap().naive_utc(),
password_modified_date: chrono::Utc
.timestamp_opt(0, 0)
.unwrap()
.naive_utc(),
uuid: lldap_domain::types::Uuid::from_name_and_date(
"robert",
&chrono::Utc.timestamp_opt(0, 0).unwrap().naive_utc(),
),
attributes: Vec::new(),
},
groups: None,
},
])
});
let context = Context::<MockTestBackendHandler>::new_for_tests(
mock,
ValidationResults {
user: UserId::new("admin"),
permission: Permission::Admin,
},
);
let schema = schema(Query::<MockTestBackendHandler>::new());
assert_eq!(
execute(QUERY, None, &schema, &Variables::new(), &context).await,
Ok((
graphql_value!(
{
"users": [
{
"id": "bob",
"email": "bob@bobbers.on"
},
{
"id": "robert",
"email": "robert@bobbers.on"
},
]
}),
vec![]
))
);
}
#[tokio::test]
async fn get_schema() {
const QUERY: &str = r#"{
schema {
userSchema {
attributes {
name
attributeType
isList
isVisible
isEditable
isHardcoded
}
extraLdapObjectClasses
}
groupSchema {
attributes {
name
attributeType
isList
isVisible
isEditable
isHardcoded
}
extraLdapObjectClasses
}
}
}"#;
let mut mock = MockTestBackendHandler::new();
setup_default_schema(&mut mock);
let context = Context::<MockTestBackendHandler>::new_for_tests(
mock,
ValidationResults {
user: UserId::new("admin"),
permission: Permission::Admin,
},
);
let schema = schema(Query::<MockTestBackendHandler>::new());
let result = execute(QUERY, None, &schema, &Variables::new(), &context).await;
assert!(result.is_ok(), "Query failed: {:?}", result);
}
#[tokio::test]
async fn regular_user_doesnt_see_non_visible_attributes() {
const QUERY: &str = r#"{
schema {
userSchema {
attributes {
name
}
extraLdapObjectClasses
}
}
}"#;
let mut mock = MockTestBackendHandler::new();
mock.expect_get_schema().times(1).return_once(|| {
Ok(Schema {
user_attributes: AttributeList {
attributes: vec![DomainAttributeSchema {
name: "invisible".into(),
attribute_type: AttributeType::JpegPhoto,
is_list: false,
is_visible: false,
is_editable: true,
is_hardcoded: true,
is_readonly: false,
}],
},
group_attributes: AttributeList {
attributes: Vec::new(),
},
extra_user_object_classes: vec![LdapObjectClass::from("customUserClass")],
extra_group_object_classes: Vec::new(),
})
});
let context = Context::<MockTestBackendHandler>::new_for_tests(
mock,
ValidationResults {
user: UserId::new("bob"),
permission: Permission::Regular,
},
);
let schema = schema(Query::<MockTestBackendHandler>::new());
let result = execute(QUERY, None, &schema, &Variables::new(), &context).await;
assert!(result.is_ok(), "Query failed: {:?}", result);
}
}
+117
View File
@@ -0,0 +1,117 @@
use juniper::graphql_object;
use lldap_domain::public_schema::PublicSchema;
use lldap_domain::schema::AttributeList as DomainAttributeList;
use lldap_domain::types::LdapObjectClass;
use lldap_domain_handlers::handler::BackendHandler;
use lldap_ldap::{get_default_group_object_classes, get_default_user_object_classes};
use serde::{Deserialize, Serialize};
use super::attribute::AttributeSchema;
use crate::api::Context;
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct AttributeList<Handler: BackendHandler> {
attributes: DomainAttributeList,
default_classes: Vec<LdapObjectClass>,
extra_classes: Vec<LdapObjectClass>,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
#[derive(Clone)]
pub struct ObjectClassInfo {
object_class: String,
is_hardcoded: bool,
}
#[graphql_object]
impl ObjectClassInfo {
fn object_class(&self) -> &str {
&self.object_class
}
fn is_hardcoded(&self) -> bool {
self.is_hardcoded
}
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> AttributeList<Handler> {
fn attributes(&self) -> Vec<AttributeSchema<Handler>> {
self.attributes
.attributes
.clone()
.into_iter()
.map(Into::into)
.collect()
}
fn extra_ldap_object_classes(&self) -> Vec<String> {
self.extra_classes.iter().map(|c| c.to_string()).collect()
}
fn ldap_object_classes(&self) -> Vec<ObjectClassInfo> {
let mut all_object_classes: Vec<ObjectClassInfo> = self
.default_classes
.iter()
.map(|c| ObjectClassInfo {
object_class: c.to_string(),
is_hardcoded: true,
})
.collect();
all_object_classes.extend(self.extra_classes.iter().map(|c| ObjectClassInfo {
object_class: c.to_string(),
is_hardcoded: false,
}));
all_object_classes
}
}
impl<Handler: BackendHandler> AttributeList<Handler> {
pub fn new(
attributes: DomainAttributeList,
default_classes: Vec<LdapObjectClass>,
extra_classes: Vec<LdapObjectClass>,
) -> Self {
Self {
attributes,
default_classes,
extra_classes,
_phantom: std::marker::PhantomData,
}
}
}
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct Schema<Handler: BackendHandler> {
schema: PublicSchema,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> Schema<Handler> {
fn user_schema(&self) -> AttributeList<Handler> {
AttributeList::<Handler>::new(
self.schema.get_schema().user_attributes.clone(),
get_default_user_object_classes(),
self.schema.get_schema().extra_user_object_classes.clone(),
)
}
fn group_schema(&self) -> AttributeList<Handler> {
AttributeList::<Handler>::new(
self.schema.get_schema().group_attributes.clone(),
get_default_group_object_classes(),
self.schema.get_schema().extra_group_object_classes.clone(),
)
}
}
impl<Handler: BackendHandler> From<PublicSchema> for Schema<Handler> {
fn from(value: PublicSchema) -> Self {
Self {
schema: value,
_phantom: std::marker::PhantomData,
}
}
}
+136
View File
@@ -0,0 +1,136 @@
use chrono::TimeZone;
use juniper::{FieldResult, graphql_object};
use lldap_access_control::UserReadableBackendHandler;
use lldap_domain::public_schema::PublicSchema;
use lldap_domain::types::{User as DomainUser, UserAndGroups as DomainUserAndGroups};
use lldap_domain_handlers::handler::BackendHandler;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tracing::{Instrument, debug, debug_span};
use super::attribute::AttributeValue;
use super::group::Group;
use crate::api::Context;
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
/// Represents a single user.
pub struct User<Handler: BackendHandler> {
user: DomainUser,
attributes: Vec<AttributeValue<Handler>>,
schema: Arc<PublicSchema>,
groups: Option<Vec<Group<Handler>>>,
_phantom: std::marker::PhantomData<Box<Handler>>,
}
impl<Handler: BackendHandler> User<Handler> {
pub fn from_user(mut user: DomainUser, schema: Arc<PublicSchema>) -> FieldResult<Self> {
let attributes = AttributeValue::<Handler>::user_attributes_from_schema(&mut user, &schema);
Ok(Self {
user,
attributes,
schema,
groups: None,
_phantom: std::marker::PhantomData,
})
}
}
impl<Handler: BackendHandler> User<Handler> {
pub fn from_user_and_groups(
DomainUserAndGroups { user, groups }: DomainUserAndGroups,
schema: Arc<PublicSchema>,
) -> FieldResult<Self> {
let mut user = Self::from_user(user, schema.clone())?;
if let Some(groups) = groups {
user.groups = Some(
groups
.into_iter()
.map(|g| Group::<Handler>::from_group_details(g, schema.clone()))
.collect::<FieldResult<Vec<_>>>()?,
);
}
Ok(user)
}
}
#[graphql_object(context = Context<Handler>)]
impl<Handler: BackendHandler> User<Handler> {
fn id(&self) -> &str {
self.user.user_id.as_str()
}
fn email(&self) -> &str {
self.user.email.as_str()
}
fn display_name(&self) -> &str {
self.user.display_name.as_deref().unwrap_or("")
}
fn first_name(&self) -> &str {
self.attributes
.iter()
.find(|a| a.name() == "first_name")
.map(|a| a.attribute.value.as_str().unwrap_or_default())
.unwrap_or_default()
}
fn last_name(&self) -> &str {
self.attributes
.iter()
.find(|a| a.name() == "last_name")
.map(|a| a.attribute.value.as_str().unwrap_or_default())
.unwrap_or_default()
}
fn avatar(&self) -> Option<String> {
self.attributes
.iter()
.find(|a| a.name() == "avatar")
.map(|a| {
String::from(
a.attribute
.value
.as_jpeg_photo()
.expect("Invalid JPEG returned by the DB"),
)
})
}
fn creation_date(&self) -> chrono::DateTime<chrono::Utc> {
chrono::Utc.from_utc_datetime(&self.user.creation_date)
}
fn uuid(&self) -> &str {
self.user.uuid.as_str()
}
/// User-defined attributes.
fn attributes(&self) -> &[AttributeValue<Handler>] {
&self.attributes
}
/// The groups to which this user belongs.
async fn groups(&self, context: &Context<Handler>) -> FieldResult<Vec<Group<Handler>>> {
if let Some(groups) = &self.groups {
return Ok(groups.clone());
}
let span = debug_span!("[GraphQL query] user::groups");
span.in_scope(|| {
debug!(user_id = ?self.user.user_id);
});
let handler = context
.get_readable_handler(&self.user.user_id)
.expect("We shouldn't be able to get there without readable permission");
let domain_groups = handler
.get_user_groups(&self.user.user_id)
.instrument(span)
.await?;
let mut groups = domain_groups
.into_iter()
.map(|g| Group::<Handler>::from_group_details(g, self.schema.clone()))
.collect::<FieldResult<Vec<Group<Handler>>>>()?;
groups.sort_by(|g1, g2| g1.display_name.cmp(&g2.display_name));
Ok(groups)
}
}
+2 -1
View File
@@ -7,6 +7,7 @@ edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies]
anyhow = "*"
@@ -63,4 +64,4 @@ version = "1.25"
[dev-dependencies.lldap_domain]
path = "../domain"
features = ["test"]
features = ["test"]
+2
View File
@@ -124,6 +124,7 @@ mod tests {
users: vec![UserId::new("bob")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
}])
});
let ldap_handler = setup_bound_admin_handler(mock).await;
@@ -218,6 +219,7 @@ mod tests {
users: vec![UserId::new("bob")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
}])
});
let ldap_handler = setup_bound_admin_handler(mock).await;
+362 -16
View File
@@ -72,6 +72,12 @@ pub fn get_group_attribute(
.to_rfc3339()
.into_bytes(),
],
GroupFieldType::ModifiedDate => vec![
chrono::Utc
.from_utc_datetime(&group.modified_date)
.to_rfc3339()
.into_bytes(),
],
GroupFieldType::Member => group
.users
.iter()
@@ -178,11 +184,11 @@ fn get_group_attribute_equality_filter(
]),
(Ok(_), Err(e)) => {
warn!("Invalid value for attribute {} (lowercased): {}", field, e);
GroupRequestFilter::from(false)
GroupRequestFilter::False
}
(Err(e), _) => {
warn!("Invalid value for attribute {}: {}", field, e);
GroupRequestFilter::from(false)
GroupRequestFilter::False
}
}
}
@@ -203,7 +209,7 @@ fn convert_group_filter(
.map(|id| GroupRequestFilter::GroupId(GroupId(id)))
.unwrap_or_else(|_| {
warn!("Given group id is not a valid integer: {}", value_lc);
GroupRequestFilter::from(false)
GroupRequestFilter::False
})),
GroupFieldType::DisplayName => Ok(GroupRequestFilter::DisplayName(value_lc.into())),
GroupFieldType::Uuid => Uuid::try_from(value_lc.as_str())
@@ -220,7 +226,7 @@ fn convert_group_filter(
.map(GroupRequestFilter::Member)
.unwrap_or_else(|e| {
warn!("Invalid member filter on group: {}", e);
GroupRequestFilter::from(false)
GroupRequestFilter::False
})),
GroupFieldType::ObjectClass => Ok(GroupRequestFilter::from(
get_default_group_object_classes()
@@ -240,7 +246,7 @@ fn convert_group_filter(
.map(GroupRequestFilter::DisplayName)
.unwrap_or_else(|_| {
warn!("Invalid dn filter on group: {}", value_lc);
GroupRequestFilter::from(false)
GroupRequestFilter::False
}))
}
GroupFieldType::NoMatch => {
@@ -251,7 +257,7 @@ fn convert_group_filter(
field
);
}
Ok(GroupRequestFilter::from(false))
Ok(GroupRequestFilter::False)
}
GroupFieldType::Attribute(field, typ, is_list) => Ok(
get_group_attribute_equality_filter(&field, typ, is_list, value),
@@ -260,23 +266,61 @@ fn convert_group_filter(
code: LdapResultCode::UnwillingToPerform,
message: "Creation date filter for groups not supported".to_owned(),
}),
GroupFieldType::ModifiedDate => Err(LdapError {
code: LdapResultCode::UnwillingToPerform,
message: "Modified date filter for groups not supported".to_owned(),
}),
}
}
LdapFilter::And(filters) => Ok(GroupRequestFilter::And(
filters.iter().map(rec).collect::<LdapResult<_>>()?,
)),
LdapFilter::Or(filters) => Ok(GroupRequestFilter::Or(
filters.iter().map(rec).collect::<LdapResult<_>>()?,
)),
LdapFilter::Not(filter) => Ok(GroupRequestFilter::Not(Box::new(rec(filter)?))),
LdapFilter::And(filters) => {
let res = filters
.iter()
.map(rec)
.filter(|f| !matches!(f, Ok(GroupRequestFilter::True)))
.flat_map(|f| match f {
Ok(GroupRequestFilter::And(v)) => v.into_iter().map(Ok).collect(),
f => vec![f],
})
.collect::<LdapResult<Vec<_>>>()?;
if res.is_empty() {
Ok(GroupRequestFilter::True)
} else if res.len() == 1 {
Ok(res.into_iter().next().unwrap())
} else {
Ok(GroupRequestFilter::And(res))
}
}
LdapFilter::Or(filters) => {
let res = filters
.iter()
.map(rec)
.filter(|c| !matches!(c, Ok(GroupRequestFilter::False)))
.flat_map(|f| match f {
Ok(GroupRequestFilter::Or(v)) => v.into_iter().map(Ok).collect(),
f => vec![f],
})
.collect::<LdapResult<Vec<_>>>()?;
if res.is_empty() {
Ok(GroupRequestFilter::False)
} else if res.len() == 1 {
Ok(res.into_iter().next().unwrap())
} else {
Ok(GroupRequestFilter::Or(res))
}
}
LdapFilter::Not(filter) => Ok(match rec(filter)? {
GroupRequestFilter::True => GroupRequestFilter::False,
GroupRequestFilter::False => GroupRequestFilter::True,
f => GroupRequestFilter::Not(Box::new(f)),
}),
LdapFilter::Present(field) => {
let field = AttributeName::from(field.as_str());
Ok(match map_group_field(&field, schema) {
GroupFieldType::Attribute(name, _, _) => {
GroupRequestFilter::CustomAttributePresent(name)
}
GroupFieldType::NoMatch => GroupRequestFilter::from(false),
_ => GroupRequestFilter::from(true),
GroupFieldType::NoMatch => GroupRequestFilter::False,
_ => GroupRequestFilter::True,
})
}
LdapFilter::Substring(field, substring_filter) => {
@@ -285,7 +329,7 @@ fn convert_group_filter(
GroupFieldType::DisplayName => Ok(GroupRequestFilter::DisplayNameSubString(
substring_filter.clone().into(),
)),
GroupFieldType::NoMatch => Ok(GroupRequestFilter::from(false)),
GroupFieldType::NoMatch => Ok(GroupRequestFilter::False),
_ => Err(LdapError {
code: LdapResultCode::UnwillingToPerform,
message: format!(
@@ -344,3 +388,305 @@ pub fn convert_groups_to_ldap_op<'a>(
))
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
handler::tests::{make_group_search_request, setup_bound_admin_handler},
search::{make_search_request, make_search_success},
};
use ldap3_proto::proto::LdapSubstringFilter;
use lldap_domain::{
types::{GroupId, UserId},
uuid,
};
use lldap_domain_handlers::handler::*;
use lldap_test_utils::MockTestBackendHandler;
use mockall::predicate::eq;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn test_search_groups() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_groups()
.with(eq(Some(GroupRequestFilter::True)))
.times(1)
.return_once(|_| {
Ok(vec![
Group {
id: GroupId(1),
display_name: "group_1".into(),
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![UserId::new("bob"), UserId::new("john")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
},
Group {
id: GroupId(3),
display_name: "BestGroup".into(),
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![UserId::new("john")],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
},
])
});
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_group_search_request(
LdapFilter::And(vec![]),
vec![
"objectClass",
"dn",
"cn",
"uniqueMember",
"entryUuid",
"entryDN",
],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "cn=group_1,ou=groups,dc=example,dc=com".to_string(),
attributes: vec![
LdapPartialAttribute {
atype: "cn".to_string(),
vals: vec![b"group_1".to_vec()]
},
LdapPartialAttribute {
atype: "entryDN".to_string(),
vals: vec![b"uid=group_1,ou=groups,dc=example,dc=com".to_vec()],
},
LdapPartialAttribute {
atype: "entryUuid".to_string(),
vals: vec![b"04ac75e0-2900-3e21-926c-2f732c26b3fc".to_vec()],
},
LdapPartialAttribute {
atype: "objectClass".to_string(),
vals: vec![b"groupOfUniqueNames".to_vec(), b"groupOfNames".to_vec()]
},
LdapPartialAttribute {
atype: "uniqueMember".to_string(),
vals: vec![
b"uid=bob,ou=people,dc=example,dc=com".to_vec(),
b"uid=john,ou=people,dc=example,dc=com".to_vec(),
],
},
],
}),
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "cn=BestGroup,ou=groups,dc=example,dc=com".to_string(),
attributes: vec![
LdapPartialAttribute {
atype: "cn".to_string(),
vals: vec![b"BestGroup".to_vec()]
},
LdapPartialAttribute {
atype: "entryDN".to_string(),
vals: vec![b"uid=BestGroup,ou=groups,dc=example,dc=com".to_vec()],
},
LdapPartialAttribute {
atype: "entryUuid".to_string(),
vals: vec![b"04ac75e0-2900-3e21-926c-2f732c26b3fc".to_vec()],
},
LdapPartialAttribute {
atype: "objectClass".to_string(),
vals: vec![b"groupOfUniqueNames".to_vec(), b"groupOfNames".to_vec()]
},
LdapPartialAttribute {
atype: "uniqueMember".to_string(),
vals: vec![b"uid=john,ou=people,dc=example,dc=com".to_vec()],
},
],
}),
make_search_success(),
])
);
}
#[tokio::test]
async fn test_search_groups_by_groupid() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_groups()
.with(eq(Some(GroupRequestFilter::GroupId(GroupId(1)))))
.times(1)
.return_once(|_| {
Ok(vec![Group {
display_name: "group_1".into(),
id: GroupId(1),
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
}])
});
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_group_search_request(
LdapFilter::Equality("groupid".to_string(), "1".to_string()),
vec!["dn"],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "cn=group_1,ou=groups,dc=example,dc=com".to_string(),
attributes: vec![],
}),
make_search_success(),
])
);
}
#[tokio::test]
async fn test_search_groups_filter() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_groups()
.with(eq(Some(GroupRequestFilter::And(vec![
GroupRequestFilter::DisplayName("group_1".into()),
GroupRequestFilter::Member(UserId::new("bob")),
GroupRequestFilter::DisplayName("rockstars".into()),
false.into(),
GroupRequestFilter::Uuid(uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc")),
false.into(),
GroupRequestFilter::DisplayNameSubString(SubStringFilter {
initial: Some("iNIt".to_owned()),
any: vec!["1".to_owned(), "2aA".to_owned()],
final_: Some("finAl".to_owned()),
}),
]))))
.times(1)
.return_once(|_| {
Ok(vec![Group {
display_name: "group_1".into(),
id: GroupId(1),
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
users: vec![],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
}])
});
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_group_search_request(
LdapFilter::And(vec![
LdapFilter::Equality("cN".to_string(), "Group_1".to_string()),
LdapFilter::Equality(
"uniqueMember".to_string(),
"uid=bob,ou=peopLe,Dc=eXample,dc=com".to_string(),
),
LdapFilter::Equality(
"dn".to_string(),
"uid=rockstars,ou=groups,dc=example,dc=com".to_string(),
),
LdapFilter::Equality(
"dn".to_string(),
"uid=rockstars,ou=people,dc=example,dc=com".to_string(),
),
LdapFilter::Equality(
"uuid".to_string(),
"04ac75e0-2900-3e21-926c-2f732c26b3fc".to_string(),
),
LdapFilter::Equality("obJEctclass".to_string(), "groupofUniqueNames".to_string()),
LdapFilter::Equality("objectclass".to_string(), "groupOfNames".to_string()),
LdapFilter::Present("objectclass".to_string()),
LdapFilter::Present("dn".to_string()),
LdapFilter::Not(Box::new(LdapFilter::Present(
"random_attribUte".to_string(),
))),
LdapFilter::Equality("unknown_attribute".to_string(), "randomValue".to_string()),
LdapFilter::Substring(
"cn".to_owned(),
LdapSubstringFilter {
initial: Some("iNIt".to_owned()),
any: vec!["1".to_owned(), "2aA".to_owned()],
final_: Some("finAl".to_owned()),
},
),
]),
vec!["1.1"],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "cn=group_1,ou=groups,dc=example,dc=com".to_string(),
attributes: vec![],
}),
make_search_success(),
])
);
}
#[tokio::test]
async fn test_search_groups_filter_2() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_groups()
.with(eq(Some(GroupRequestFilter::Or(vec![
GroupRequestFilter::DisplayName("group_1".into()),
GroupRequestFilter::Member(UserId::new("bob")),
]))))
.times(1)
.return_once(|_| Ok(vec![]));
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_group_search_request(
LdapFilter::Or(vec![
LdapFilter::Equality("cn".to_string(), "group_1".to_string()),
LdapFilter::Equality(
"member".to_string(),
"uid=bob,ou=people,dc=example,dc=com".to_string(),
),
]),
vec!["cn"],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![make_search_success()])
);
}
#[tokio::test]
async fn test_search_groups_filter_3() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_groups()
.with(eq(Some(GroupRequestFilter::Not(Box::new(
GroupRequestFilter::DisplayName("group_1".into()),
)))))
.times(1)
.return_once(|_| Ok(vec![]));
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_group_search_request(
LdapFilter::Not(Box::new(LdapFilter::Equality(
"cn".to_string(),
"group_1".to_string(),
))),
vec!["cn"],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![make_search_success()])
);
}
#[tokio::test]
async fn test_search_group_as_scope() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_groups()
.with(eq(Some(GroupRequestFilter::DisplayName("group_1".into()))))
.times(1)
.return_once(|_| Ok(vec![]));
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_search_request(
"cn=group_1,ou=groups,dc=example,dc=com",
LdapFilter::And(vec![]),
vec!["objectClass"],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![make_search_success()]),
);
}
}
+489 -23
View File
@@ -3,10 +3,10 @@ use crate::core::{
utils::{
ExpandedAttributes, LdapInfo, UserFieldType, expand_attribute_wildcards,
get_custom_attribute, get_group_id_from_distinguished_name_or_plain_name,
get_user_id_from_distinguished_name_or_plain_name, map_user_field,
get_user_id_from_distinguished_name_or_plain_name, map_user_field, to_generalized_time,
},
};
use chrono::TimeZone;
use ldap3_proto::{
LdapFilter, LdapPartialAttribute, LdapResultCode, LdapSearchResultEntry, proto::LdapOp,
};
@@ -87,12 +87,15 @@ pub fn get_user_attribute(
UserFieldType::PrimaryField(UserColumn::DisplayName) => {
vec![user.display_name.clone()?.into_bytes()]
}
UserFieldType::PrimaryField(UserColumn::CreationDate) => vec![
chrono::Utc
.from_utc_datetime(&user.creation_date)
.to_rfc3339()
.into_bytes(),
],
UserFieldType::PrimaryField(UserColumn::CreationDate) => {
vec![to_generalized_time(&user.creation_date)]
}
UserFieldType::PrimaryField(UserColumn::ModifiedDate) => {
vec![to_generalized_time(&user.modified_date)]
}
UserFieldType::PrimaryField(UserColumn::PasswordModifiedDate) => {
vec![to_generalized_time(&user.password_modified_date)]
}
UserFieldType::Attribute(attr, _, _) => get_custom_attribute(&user.attributes, &attr)?,
UserFieldType::NoMatch => match attribute.as_str() {
"1.1" => return None,
@@ -190,11 +193,11 @@ fn get_user_attribute_equality_filter(
]),
(Ok(_), Err(e)) => {
warn!("Invalid value for attribute {} (lowercased): {}", field, e);
UserRequestFilter::from(false)
UserRequestFilter::False
}
(Err(e), _) => {
warn!("Invalid value for attribute {}: {}", field, e);
UserRequestFilter::from(false)
UserRequestFilter::False
}
}
}
@@ -206,13 +209,47 @@ fn convert_user_filter(
) -> LdapResult<UserRequestFilter> {
let rec = |f| convert_user_filter(ldap_info, f, schema);
match filter {
LdapFilter::And(filters) => Ok(UserRequestFilter::And(
filters.iter().map(rec).collect::<LdapResult<_>>()?,
)),
LdapFilter::Or(filters) => Ok(UserRequestFilter::Or(
filters.iter().map(rec).collect::<LdapResult<_>>()?,
)),
LdapFilter::Not(filter) => Ok(UserRequestFilter::Not(Box::new(rec(filter)?))),
LdapFilter::And(filters) => {
let res = filters
.iter()
.map(rec)
.filter(|c| !matches!(c, Ok(UserRequestFilter::True)))
.flat_map(|f| match f {
Ok(UserRequestFilter::And(v)) => v.into_iter().map(Ok).collect(),
f => vec![f],
})
.collect::<LdapResult<Vec<_>>>()?;
if res.is_empty() {
Ok(UserRequestFilter::True)
} else if res.len() == 1 {
Ok(res.into_iter().next().unwrap())
} else {
Ok(UserRequestFilter::And(res))
}
}
LdapFilter::Or(filters) => {
let res = filters
.iter()
.map(rec)
.filter(|c| !matches!(c, Ok(UserRequestFilter::False)))
.flat_map(|f| match f {
Ok(UserRequestFilter::Or(v)) => v.into_iter().map(Ok).collect(),
f => vec![f],
})
.collect::<LdapResult<Vec<_>>>()?;
if res.is_empty() {
Ok(UserRequestFilter::False)
} else if res.len() == 1 {
Ok(res.into_iter().next().unwrap())
} else {
Ok(UserRequestFilter::Or(res))
}
}
LdapFilter::Not(filter) => Ok(match rec(filter)? {
UserRequestFilter::True => UserRequestFilter::False,
UserRequestFilter::False => UserRequestFilter::True,
f => UserRequestFilter::Not(Box::new(f)),
}),
LdapFilter::Equality(field, value) => {
let field = AttributeName::from(field.as_str());
let value_lc = value.to_ascii_lowercase();
@@ -224,6 +261,21 @@ fn convert_user_filter(
UserColumn::LowercaseEmail,
value_lc,
)),
UserFieldType::PrimaryField(UserColumn::DisplayName) => {
// DisplayName (cn) should match case-insensitively, so we try both
// the original value and the lowercase value (if different)
if value.as_str() == value_lc {
Ok(UserRequestFilter::Equality(
UserColumn::DisplayName,
value_lc,
))
} else {
Ok(UserRequestFilter::Or(vec![
UserRequestFilter::Equality(UserColumn::DisplayName, value.to_string()),
UserRequestFilter::Equality(UserColumn::DisplayName, value_lc),
]))
}
}
UserFieldType::PrimaryField(field) => {
Ok(UserRequestFilter::Equality(field, value_lc))
}
@@ -238,7 +290,7 @@ fn convert_user_filter(
field
);
}
Ok(UserRequestFilter::from(false))
Ok(UserRequestFilter::False)
}
UserFieldType::ObjectClass => Ok(UserRequestFilter::from(
get_default_user_object_classes()
@@ -257,7 +309,7 @@ fn convert_user_filter(
.map(UserRequestFilter::MemberOf)
.unwrap_or_else(|e| {
warn!("Invalid memberOf filter: {}", e);
UserRequestFilter::from(false)
UserRequestFilter::False
})),
UserFieldType::EntryDn | UserFieldType::Dn => {
Ok(get_user_id_from_distinguished_name_or_plain_name(
@@ -268,7 +320,7 @@ fn convert_user_filter(
.map(UserRequestFilter::UserId)
.unwrap_or_else(|_| {
warn!("Invalid dn filter on user: {}", value_lc);
UserRequestFilter::from(false)
UserRequestFilter::False
}))
}
}
@@ -279,8 +331,8 @@ fn convert_user_filter(
UserFieldType::Attribute(name, _, _) => {
UserRequestFilter::CustomAttributePresent(name)
}
UserFieldType::NoMatch => UserRequestFilter::from(false),
_ => UserRequestFilter::from(true),
UserFieldType::NoMatch => UserRequestFilter::False,
_ => UserRequestFilter::True,
})
}
LdapFilter::Substring(field, substring_filter) => {
@@ -299,7 +351,7 @@ fn convert_user_filter(
code: LdapResultCode::UnwillingToPerform,
message: format!("Unsupported user attribute for substring filter: {field:?}"),
}),
UserFieldType::NoMatch => Ok(UserRequestFilter::from(false)),
UserFieldType::NoMatch => Ok(UserRequestFilter::False),
UserFieldType::PrimaryField(UserColumn::Email) => Ok(UserRequestFilter::SubString(
UserColumn::LowercaseEmail,
substring_filter.clone().into(),
@@ -363,3 +415,417 @@ pub fn convert_users_to_ldap_op<'a>(
))
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
handler::tests::{
make_user_search_request, setup_bound_admin_handler, setup_bound_handler_with_group,
setup_bound_readonly_handler,
},
search::{make_search_request, make_search_success},
};
use chrono::{DateTime, Duration, NaiveDateTime, TimeZone, Utc};
use lldap_domain::types::{Attribute, GroupDetails, JpegPhoto};
use lldap_test_utils::MockTestBackendHandler;
use mockall::predicate::eq;
use pretty_assertions::assert_eq;
fn assert_timestamp_within_margin(
timestamp_bytes: &[u8],
base_timestamp_dt: DateTime<Utc>,
time_margin: Duration,
) {
let timestamp_str =
std::str::from_utf8(timestamp_bytes).expect("Invalid conversion from UTF-8 to string");
let timestamp_naive = NaiveDateTime::parse_from_str(timestamp_str, "%Y%m%d%H%M%SZ")
.expect("Invalid timestamp format");
let timestamp_dt: DateTime<Utc> = Utc.from_utc_datetime(&timestamp_naive);
let within_range = (base_timestamp_dt - timestamp_dt).abs() <= time_margin;
assert!(
within_range,
"Timestamp not within range: expected within [{} - {}], got [{}]",
base_timestamp_dt - time_margin,
base_timestamp_dt + time_margin,
timestamp_dt
);
}
#[tokio::test]
async fn test_search_regular_user() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_users()
.with(
eq(Some(UserRequestFilter::And(vec![
UserRequestFilter::True,
UserRequestFilter::UserId(UserId::new("test")),
]))),
eq(false),
)
.times(1)
.return_once(|_, _| {
Ok(vec![UserAndGroups {
user: User {
user_id: UserId::new("test"),
..Default::default()
},
groups: None,
}])
});
let ldap_handler = setup_bound_handler_with_group(mock, "regular").await;
let request =
make_user_search_request::<String>(LdapFilter::And(vec![]), vec!["1.1".to_string()]);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "uid=test,ou=people,dc=example,dc=com".to_string(),
attributes: vec![],
}),
make_search_success()
]),
);
}
#[tokio::test]
async fn test_search_readonly_user() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_users()
.with(eq(Some(UserRequestFilter::True)), eq(false))
.times(1)
.return_once(|_, _| Ok(vec![]));
let ldap_handler = setup_bound_readonly_handler(mock).await;
let request = make_user_search_request(LdapFilter::And(vec![]), vec!["1.1"]);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![make_search_success()]),
);
}
#[tokio::test]
async fn test_search_member_of() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_users()
.with(eq(Some(UserRequestFilter::True)), eq(true))
.times(1)
.return_once(|_, _| {
Ok(vec![UserAndGroups {
user: User {
user_id: UserId::new("bob"),
..Default::default()
},
groups: Some(vec![GroupDetails {
group_id: lldap_domain::types::GroupId(42),
display_name: "rockstars".into(),
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: lldap_domain::uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
}]),
}])
});
let ldap_handler = setup_bound_readonly_handler(mock).await;
let request = make_user_search_request::<String>(
LdapFilter::And(vec![]),
vec!["memberOf".to_string()],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "uid=bob,ou=people,dc=example,dc=com".to_string(),
attributes: vec![LdapPartialAttribute {
atype: "memberOf".to_string(),
vals: vec![b"cn=rockstars,ou=groups,dc=example,dc=com".to_vec()]
}],
}),
make_search_success(),
]),
);
}
#[tokio::test]
async fn test_search_user_as_scope() {
let mut mock = MockTestBackendHandler::new();
mock.expect_list_users()
.with(
eq(Some(UserRequestFilter::UserId(UserId::new("bob")))),
eq(false),
)
.times(1)
.return_once(|_, _| Ok(vec![]));
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_search_request(
"uid=bob,ou=people,dc=example,dc=com",
LdapFilter::And(vec![]),
vec!["objectClass"],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![make_search_success()]),
);
}
#[tokio::test]
async fn test_search_users() {
use chrono::prelude::*;
use lldap_domain::uuid;
let mut mock = MockTestBackendHandler::new();
mock.expect_list_users().times(1).return_once(|_, _| {
Ok(vec![
UserAndGroups {
user: User {
user_id: UserId::new("bob_1"),
email: "bob@bobmail.bob".into(),
display_name: Some("Bôb Böbberson".to_string()),
uuid: uuid!("698e1d5f-7a40-3151-8745-b9b8a37839da"),
attributes: vec![
Attribute {
name: "first_name".into(),
value: "Bôb".to_string().into(),
},
Attribute {
name: "last_name".into(),
value: "Böbberson".to_string().into(),
},
],
..Default::default()
},
groups: None,
},
UserAndGroups {
user: User {
user_id: UserId::new("jim"),
email: "jim@cricket.jim".into(),
display_name: Some("Jimminy Cricket".to_string()),
attributes: vec![
Attribute {
name: "avatar".into(),
value: JpegPhoto::for_tests().into(),
},
Attribute {
name: "first_name".into(),
value: "Jim".to_string().into(),
},
Attribute {
name: "last_name".into(),
value: "Cricket".to_string().into(),
},
],
uuid: uuid!("04ac75e0-2900-3e21-926c-2f732c26b3fc"),
creation_date: Utc
.with_ymd_and_hms(2014, 7, 8, 9, 10, 11)
.unwrap()
.naive_utc(),
modified_date: Utc
.with_ymd_and_hms(2014, 7, 8, 9, 10, 11)
.unwrap()
.naive_utc(),
password_modified_date: Utc
.with_ymd_and_hms(2014, 7, 8, 9, 10, 11)
.unwrap()
.naive_utc(),
},
groups: None,
},
])
});
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_user_search_request(
LdapFilter::And(vec![]),
vec![
"objectClass",
"dn",
"uid",
"mail",
"givenName",
"sn",
"cn",
"createTimestamp",
"entryUuid",
"jpegPhoto",
],
);
assert_eq!(
ldap_handler.do_search_or_dse(&request).await,
Ok(vec![
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "uid=bob_1,ou=people,dc=example,dc=com".to_string(),
attributes: vec![
LdapPartialAttribute {
atype: "cn".to_string(),
vals: vec!["Bôb Böbberson".to_string().into_bytes()]
},
LdapPartialAttribute {
atype: "createTimestamp".to_string(),
vals: vec![b"19700101000000Z".to_vec()]
},
LdapPartialAttribute {
atype: "entryUuid".to_string(),
vals: vec![b"698e1d5f-7a40-3151-8745-b9b8a37839da".to_vec()]
},
LdapPartialAttribute {
atype: "givenName".to_string(),
vals: vec!["Bôb".to_string().into_bytes()]
},
LdapPartialAttribute {
atype: "mail".to_string(),
vals: vec![b"bob@bobmail.bob".to_vec()]
},
LdapPartialAttribute {
atype: "objectClass".to_string(),
vals: vec![
b"inetOrgPerson".to_vec(),
b"posixAccount".to_vec(),
b"mailAccount".to_vec(),
b"person".to_vec(),
b"customUserClass".to_vec(),
]
},
LdapPartialAttribute {
atype: "sn".to_string(),
vals: vec!["Böbberson".to_string().into_bytes()]
},
LdapPartialAttribute {
atype: "uid".to_string(),
vals: vec![b"bob_1".to_vec()]
},
],
}),
LdapOp::SearchResultEntry(LdapSearchResultEntry {
dn: "uid=jim,ou=people,dc=example,dc=com".to_string(),
attributes: vec![
LdapPartialAttribute {
atype: "cn".to_string(),
vals: vec![b"Jimminy Cricket".to_vec()]
},
LdapPartialAttribute {
atype: "createTimestamp".to_string(),
vals: vec![b"20140708091011Z".to_vec()]
},
LdapPartialAttribute {
atype: "entryUuid".to_string(),
vals: vec![b"04ac75e0-2900-3e21-926c-2f732c26b3fc".to_vec()]
},
LdapPartialAttribute {
atype: "givenName".to_string(),
vals: vec![b"Jim".to_vec()]
},
LdapPartialAttribute {
atype: "jpegPhoto".to_string(),
vals: vec![JpegPhoto::for_tests().into_bytes()]
},
LdapPartialAttribute {
atype: "mail".to_string(),
vals: vec![b"jim@cricket.jim".to_vec()]
},
LdapPartialAttribute {
atype: "objectClass".to_string(),
vals: vec![
b"inetOrgPerson".to_vec(),
b"posixAccount".to_vec(),
b"mailAccount".to_vec(),
b"person".to_vec(),
b"customUserClass".to_vec(),
]
},
LdapPartialAttribute {
atype: "sn".to_string(),
vals: vec![b"Cricket".to_vec()]
},
LdapPartialAttribute {
atype: "uid".to_string(),
vals: vec![b"jim".to_vec()]
},
],
}),
make_search_success(),
])
);
}
#[tokio::test]
async fn test_pwd_changed_time_format() {
use lldap_domain::uuid;
let mut mock = MockTestBackendHandler::new();
mock.expect_list_users().times(1).return_once(|_, _| {
Ok(vec![UserAndGroups {
user: User {
user_id: UserId::new("bob_1"),
email: "bob@bobmail.bob".into(),
uuid: uuid!("698e1d5f-7a40-3151-8745-b9b8a37839da"),
attributes: vec![],
password_modified_date: Utc
.with_ymd_and_hms(2014, 7, 8, 9, 10, 11)
.unwrap()
.naive_utc(),
..Default::default()
},
groups: None,
}])
});
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_user_search_request(LdapFilter::And(vec![]), vec!["pwdChangedTime"]);
if let LdapOp::SearchResultEntry(entry) =
&ldap_handler.do_search_or_dse(&request).await.unwrap()[0]
{
assert_eq!(entry.attributes.len(), 1);
assert_eq!(entry.attributes[0].atype, "pwdChangedTime");
assert_eq!(entry.attributes[0].vals.len(), 1);
assert_timestamp_within_margin(
&entry.attributes[0].vals[0],
Utc.with_ymd_and_hms(2014, 7, 8, 9, 10, 11).unwrap(),
Duration::seconds(1),
);
} else {
panic!("Expected SearchResultEntry");
}
}
#[tokio::test]
async fn test_search_cn_case_insensitive() {
use lldap_domain::uuid;
let mut mock = MockTestBackendHandler::new();
mock.expect_list_users()
.with(
eq(Some(UserRequestFilter::Or(vec![
UserRequestFilter::Equality(UserColumn::DisplayName, "TestAll".to_string()),
UserRequestFilter::Equality(UserColumn::DisplayName, "testall".to_string()),
]))),
eq(false),
)
.times(1)
.return_once(|_, _| {
Ok(vec![UserAndGroups {
user: User {
user_id: UserId::new("testall"),
email: "test@example.com".into(),
display_name: Some("TestAll".to_string()),
uuid: uuid!("698e1d5f-7a40-3151-8745-b9b8a37839da"),
attributes: vec![],
..Default::default()
},
groups: None,
}])
});
let ldap_handler = setup_bound_admin_handler(mock).await;
let request = make_user_search_request(
LdapFilter::Equality("cn".to_string(), "TestAll".to_string()),
vec!["cn", "uid"],
);
let results = ldap_handler.do_search_or_dse(&request).await.unwrap();
assert_eq!(results.len(), 2);
if let LdapOp::SearchResultEntry(entry) = &results[0] {
assert_eq!(entry.dn, "uid=testall,ou=people,dc=example,dc=com");
assert_eq!(entry.attributes.len(), 2);
assert_eq!(entry.attributes[0].atype, "cn");
assert_eq!(entry.attributes[0].vals[0], b"TestAll");
} else {
panic!("Expected SearchResultEntry");
}
}
}
+50 -13
View File
@@ -3,7 +3,7 @@ use crate::core::{
group::{REQUIRED_GROUP_ATTRIBUTES, get_default_group_object_classes},
user::{REQUIRED_USER_ATTRIBUTES, get_default_user_object_classes},
};
use chrono::TimeZone;
use chrono::{NaiveDateTime, TimeZone};
use itertools::join;
use ldap3_proto::LdapResultCode;
use lldap_domain::{
@@ -18,6 +18,16 @@ use lldap_domain_model::model::UserColumn;
use std::collections::BTreeMap;
use tracing::{debug, instrument, warn};
/// Convert a NaiveDateTime to LDAP GeneralizedTime format (YYYYMMDDHHMMSSZ)
/// This is the standard format required by LDAP for timestamp attributes like pwdChangedTime
pub fn to_generalized_time(dt: &NaiveDateTime) -> Vec<u8> {
chrono::Utc
.from_utc_datetime(dt)
.format("%Y%m%d%H%M%SZ")
.to_string()
.into_bytes()
}
fn make_dn_pair<I>(mut iter: I) -> LdapResult<(String, String)>
where
I: Iterator<Item = String>,
@@ -239,9 +249,15 @@ pub fn map_user_field(field: &AttributeName, schema: &PublicSchema) -> UserField
AttributeType::JpegPhoto,
false,
),
"creationdate" | "createtimestamp" | "modifytimestamp" | "creation_date" => {
"creationdate" | "createtimestamp" | "creation_date" => {
UserFieldType::PrimaryField(UserColumn::CreationDate)
}
"modifytimestamp" | "modifydate" | "modified_date" => {
UserFieldType::PrimaryField(UserColumn::ModifiedDate)
}
"pwdchangedtime" | "passwordmodifydate" | "password_modified_date" => {
UserFieldType::PrimaryField(UserColumn::PasswordModifiedDate)
}
"entryuuid" | "uuid" => UserFieldType::PrimaryField(UserColumn::Uuid),
_ => schema
.get_schema()
@@ -257,6 +273,7 @@ pub enum GroupFieldType {
GroupId,
DisplayName,
CreationDate,
ModifiedDate,
ObjectClass,
Dn,
// Like Dn, but returned as part of the attributes.
@@ -272,9 +289,8 @@ pub fn map_group_field(field: &AttributeName, schema: &PublicSchema) -> GroupFie
"entrydn" => GroupFieldType::EntryDn,
"objectclass" => GroupFieldType::ObjectClass,
"cn" | "displayname" | "uid" | "display_name" | "id" => GroupFieldType::DisplayName,
"creationdate" | "createtimestamp" | "modifytimestamp" | "creation_date" => {
GroupFieldType::CreationDate
}
"creationdate" | "createtimestamp" | "creation_date" => GroupFieldType::CreationDate,
"modifytimestamp" | "modifydate" | "modified_date" => GroupFieldType::ModifiedDate,
"member" | "uniquemember" => GroupFieldType::Member,
"entryuuid" | "uuid" => GroupFieldType::Uuid,
"group_id" | "groupid" => GroupFieldType::GroupId,
@@ -294,16 +310,27 @@ pub struct LdapInfo {
pub ignored_group_attributes: Vec<AttributeName>,
}
impl LdapInfo {
pub fn new(
base_dn: &str,
ignored_user_attributes: Vec<AttributeName>,
ignored_group_attributes: Vec<AttributeName>,
) -> LdapResult<Self> {
let base_dn = parse_distinguished_name(&base_dn.to_ascii_lowercase())?;
let base_dn_str = join(base_dn.iter().map(|(k, v)| format!("{k}={v}")), ",");
Ok(Self {
base_dn,
base_dn_str,
ignored_user_attributes,
ignored_group_attributes,
})
}
}
pub fn get_custom_attribute(
attributes: &[Attribute],
attribute_name: &AttributeName,
) -> Option<Vec<Vec<u8>>> {
let convert_date = |date| {
chrono::Utc
.from_utc_datetime(date)
.to_rfc3339()
.into_bytes()
};
attributes
.iter()
.find(|a| &a.name == attribute_name)
@@ -329,9 +356,9 @@ pub fn get_custom_attribute(
AttributeValue::JpegPhoto(Cardinality::Unbounded(l)) => {
l.iter().map(|p| p.clone().into_bytes()).collect()
}
AttributeValue::DateTime(Cardinality::Singleton(dt)) => vec![convert_date(dt)],
AttributeValue::DateTime(Cardinality::Singleton(dt)) => vec![to_generalized_time(dt)],
AttributeValue::DateTime(Cardinality::Unbounded(l)) => {
l.iter().map(convert_date).collect()
l.iter().map(to_generalized_time).collect()
}
})
}
@@ -515,4 +542,14 @@ mod tests {
parsed_dn
);
}
#[test]
fn test_whitespace_in_ldap_info() {
assert_eq!(
LdapInfo::new(" ou=people, dc =example, dc=com \n", vec![], vec![])
.unwrap()
.base_dn_str,
"ou=people,dc=example,dc=com"
);
}
}
+2
View File
@@ -154,6 +154,7 @@ mod tests {
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
users: Vec::new(),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
}])
});
mock.expect_delete_group()
@@ -284,6 +285,7 @@ mod tests {
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
users: Vec::new(),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
}])
});
mock.expect_delete_group()
+15 -24
View File
@@ -2,7 +2,7 @@ use crate::{
compare,
core::{
error::{LdapError, LdapResult},
utils::{LdapInfo, parse_distinguished_name},
utils::LdapInfo,
},
create, delete, modify,
password::{self, do_password_modification},
@@ -18,7 +18,7 @@ use ldap3_proto::proto::{
};
use lldap_access_control::AccessControlledBackendHandler;
use lldap_auth::access_control::ValidationResults;
use lldap_domain::{public_schema::PublicSchema, types::AttributeName};
use lldap_domain::public_schema::PublicSchema;
use lldap_domain_handlers::handler::{BackendHandler, LoginHandler, ReadSchemaBackendHandler};
use lldap_opaque_handler::OpaqueHandler;
use tracing::{debug, instrument};
@@ -59,7 +59,7 @@ pub(crate) fn make_modify_response(code: LdapResultCode, message: String) -> Lda
pub struct LdapHandler<Backend> {
user_info: Option<ValidationResults>,
backend_handler: AccessControlledBackendHandler<Backend>,
ldap_info: LdapInfo,
ldap_info: &'static LdapInfo,
session_uuid: uuid::Uuid,
}
@@ -89,23 +89,13 @@ enum Credentials<'s> {
impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend> {
pub fn new(
backend_handler: AccessControlledBackendHandler<Backend>,
mut ldap_base_dn: String,
ignored_user_attributes: Vec<AttributeName>,
ignored_group_attributes: Vec<AttributeName>,
ldap_info: &'static LdapInfo,
session_uuid: uuid::Uuid,
) -> Self {
ldap_base_dn.make_ascii_lowercase();
Self {
user_info: None,
backend_handler,
ldap_info: LdapInfo {
base_dn: parse_distinguished_name(&ldap_base_dn).unwrap_or_else(|_| {
panic!("Invalid value for ldap_base_dn in configuration: {ldap_base_dn}")
}),
base_dn_str: ldap_base_dn,
ignored_user_attributes,
ignored_group_attributes,
},
ldap_info,
session_uuid,
}
}
@@ -114,9 +104,9 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
pub fn new_for_tests(backend_handler: Backend, ldap_base_dn: &str) -> Self {
Self::new(
AccessControlledBackendHandler::new(backend_handler),
ldap_base_dn.to_string(),
vec![],
vec![],
Box::leak(Box::new(
LdapInfo::new(ldap_base_dn, Vec::new(), Vec::new()).unwrap(),
)),
uuid::Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap(),
)
}
@@ -171,13 +161,13 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
let backend_handler = self
.backend_handler
.get_user_restricted_lister_handler(user_info);
search::do_search(&backend_handler, &self.ldap_info, request).await
search::do_search(&backend_handler, self.ldap_info, request).await
}
#[instrument(skip_all, level = "debug", fields(dn = %request.dn))]
pub async fn do_bind(&mut self, request: &LdapBindRequest) -> Vec<LdapOp> {
let (code, message) =
match password::do_bind(&self.ldap_info, request, self.get_login_handler()).await {
match password::do_bind(self.ldap_info, request, self.get_login_handler()).await {
Ok(user_id) => {
self.user_info = self
.backend_handler
@@ -211,7 +201,7 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
};
do_password_modification(
credentials,
&self.ldap_info,
self.ldap_info,
&self.backend_handler,
self.get_opaque_handler(),
&password_request,
@@ -257,7 +247,7 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
self.backend_handler
.get_readable_handler(credentials, &user_id)
},
&self.ldap_info,
self.ldap_info,
credentials,
request,
)
@@ -275,7 +265,7 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
code: LdapResultCode::InsufficentAccessRights,
message: "Unauthorized write".to_string(),
})?;
create::create_user_or_group(backend_handler, &self.ldap_info, request).await
create::create_user_or_group(backend_handler, self.ldap_info, request).await
}
#[instrument(skip_all, level = "debug")]
@@ -288,7 +278,7 @@ impl<Backend: BackendHandler + LoginHandler + OpaqueHandler> LdapHandler<Backend
code: LdapResultCode::InsufficentAccessRights,
message: "Unauthorized write".to_string(),
})?;
delete::delete_user_or_group(backend_handler, &self.ldap_info, request).await
delete::delete_user_or_group(backend_handler, self.ldap_info, request).await
}
#[instrument(skip_all, level = "debug")]
@@ -398,6 +388,7 @@ pub mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
});
Ok(set)
});
+1 -1
View File
@@ -7,7 +7,7 @@ pub(crate) mod modify;
pub(crate) mod password;
pub(crate) mod search;
pub use core::utils::{UserFieldType, map_group_field, map_user_field};
pub use core::utils::{LdapInfo, UserFieldType, map_group_field, map_user_field};
pub use handler::LdapHandler;
pub use core::group::get_default_group_object_classes;
+1
View File
@@ -158,6 +158,7 @@ mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
});
}
Ok(g)
+2
View File
@@ -263,6 +263,7 @@ pub mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
});
Ok(set)
});
@@ -520,6 +521,7 @@ pub mod tests {
creation_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
uuid: uuid!("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
attributes: Vec::new(),
modified_date: chrono::Utc.timestamp_opt(42, 42).unwrap().naive_utc(),
});
mock.expect_get_user_groups()
.with(eq(UserId::new("bob")))
+154 -745
View File
File diff suppressed because it is too large Load Diff
+1
View File
@@ -7,6 +7,7 @@ edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[features]
test = []
+1
View File
@@ -7,6 +7,7 @@ edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[features]
test = []
@@ -206,6 +206,7 @@ impl GroupBackendHandler for SqlBackendHandler {
lowercase_display_name: Set(lower_display_name),
creation_date: Set(now),
uuid: Set(uuid),
modified_date: Set(now),
..Default::default()
};
Ok(self
@@ -268,10 +269,12 @@ impl SqlBackendHandler {
.display_name
.as_ref()
.map(|s| s.as_str().to_lowercase());
let now = chrono::Utc::now().naive_utc();
let update_group = model::groups::ActiveModel {
group_id: Set(request.group_id),
display_name: request.display_name.map(Set).unwrap_or_default(),
lowercase_display_name: lower_display_name.map(Set).unwrap_or_default(),
modified_date: Set(now),
..Default::default()
};
update_group.update(transaction).await?;
@@ -27,6 +27,8 @@ pub enum Users {
TotpSecret,
MfaType,
Uuid,
ModifiedDate,
PasswordModifiedDate,
}
#[derive(DeriveIden, PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)]
@@ -37,6 +39,7 @@ pub(crate) enum Groups {
LowercaseDisplayName,
CreationDate,
Uuid,
ModifiedDate,
}
#[derive(DeriveIden, Clone, Copy)]
@@ -1112,6 +1115,53 @@ async fn migrate_to_v10(transaction: DatabaseTransaction) -> Result<DatabaseTran
Ok(transaction)
}
async fn migrate_to_v11(transaction: DatabaseTransaction) -> Result<DatabaseTransaction, DbErr> {
let builder = transaction.get_database_backend();
// Add modified_date to users table
transaction
.execute(
builder.build(
Table::alter().table(Users::Table).add_column(
ColumnDef::new(Users::ModifiedDate)
.date_time()
.not_null()
.default(chrono::Utc::now().naive_utc()),
),
),
)
.await?;
// Add password_modified_date to users table
transaction
.execute(
builder.build(
Table::alter().table(Users::Table).add_column(
ColumnDef::new(Users::PasswordModifiedDate)
.date_time()
.not_null()
.default(chrono::Utc::now().naive_utc()),
),
),
)
.await?;
// Add modified_date to groups table
transaction
.execute(
builder.build(
Table::alter().table(Groups::Table).add_column(
ColumnDef::new(Groups::ModifiedDate)
.date_time()
.not_null()
.default(chrono::Utc::now().naive_utc()),
),
),
)
.await?;
Ok(transaction)
}
// This is needed to make an array of async functions.
macro_rules! to_sync {
($l:ident) => {
@@ -1142,6 +1192,7 @@ pub(crate) async fn migrate_from_version(
to_sync!(migrate_to_v8),
to_sync!(migrate_to_v9),
to_sync!(migrate_to_v10),
to_sync!(migrate_to_v11),
];
assert_eq!(migrations.len(), (LAST_SCHEMA_VERSION.0 - 1) as usize);
for migration in 2..=last_version.0 {
@@ -197,9 +197,12 @@ impl OpaqueHandler for SqlOpaqueHandler {
let password_file =
opaque::server::registration::get_password_file(request.registration_upload);
// Set the user password to the new password.
let now = chrono::Utc::now().naive_utc();
let user_update = model::users::ActiveModel {
user_id: ActiveValue::Set(username.clone()),
password_hash: ActiveValue::Set(Some(password_file.serialize())),
password_modified_date: ActiveValue::Set(now),
modified_date: ActiveValue::Set(now),
..Default::default()
};
user_update.update(&self.sql_pool).await?;
+1 -1
View File
@@ -9,7 +9,7 @@ pub type DbConnection = sea_orm::DatabaseConnection;
#[derive(Copy, PartialEq, Eq, Debug, Clone, PartialOrd, Ord, DeriveValueType)]
pub struct SchemaVersion(pub i16);
pub const LAST_SCHEMA_VERSION: SchemaVersion = SchemaVersion(10);
pub const LAST_SCHEMA_VERSION: SchemaVersion = SchemaVersion(11);
#[derive(Copy, PartialEq, Eq, Debug, Clone, PartialOrd, Ord)]
pub struct PrivateKeyHash(pub [u8; 32]);
@@ -2,7 +2,11 @@ use crate::sql_backend_handler::SqlBackendHandler;
use async_trait::async_trait;
use lldap_domain::{
requests::{CreateUserRequest, UpdateUserRequest},
types::{AttributeName, GroupDetails, GroupId, Serialized, User, UserAndGroups, UserId, Uuid},
schema::Schema,
types::{
Attribute, AttributeName, GroupDetails, GroupId, Serialized, User, UserAndGroups, UserId,
Uuid,
},
};
use lldap_domain_handlers::handler::{
ReadSchemaBackendHandler, UserBackendHandler, UserListerBackendHandler, UserRequestFilter,
@@ -185,18 +189,12 @@ impl UserListerBackendHandler for SqlBackendHandler {
}
impl SqlBackendHandler {
async fn update_user_with_transaction(
transaction: &DatabaseTransaction,
request: UpdateUserRequest,
) -> Result<()> {
let lower_email = request.email.as_ref().map(|s| s.as_str().to_lowercase());
let update_user = model::users::ActiveModel {
user_id: ActiveValue::Set(request.user_id.clone()),
email: request.email.map(ActiveValue::Set).unwrap_or_default(),
lowercase_email: lower_email.map(ActiveValue::Set).unwrap_or_default(),
display_name: to_value(&request.display_name),
..Default::default()
};
fn compute_user_attribute_changes(
user_id: &UserId,
insert_attributes: Vec<Attribute>,
delete_attributes: Vec<AttributeName>,
schema: &Schema,
) -> Result<(Vec<model::user_attributes::ActiveModel>, Vec<AttributeName>)> {
let mut update_user_attributes = Vec::new();
let mut remove_user_attributes = Vec::new();
let mut process_serialized =
@@ -206,24 +204,20 @@ impl SqlBackendHandler {
}
ActiveValue::Set(_) => {
update_user_attributes.push(model::user_attributes::ActiveModel {
user_id: Set(request.user_id.clone()),
user_id: Set(user_id.clone()),
attribute_name: Set(attribute_name),
value,
})
}
_ => unreachable!(),
};
let schema = Self::get_schema_with_transaction(transaction).await?;
for attribute in request.insert_attributes {
for attribute in insert_attributes {
if schema
.user_attributes
.get_attribute_type(&attribute.name)
.is_some()
{
process_serialized(
ActiveValue::Set(attribute.value.into()),
attribute.name.clone(),
);
process_serialized(ActiveValue::Set(attribute.value.into()), attribute.name);
} else {
return Err(DomainError::InternalError(format!(
"User attribute name {} doesn't exist in the schema, yet was attempted to be inserted in the database",
@@ -231,7 +225,7 @@ impl SqlBackendHandler {
)));
}
}
for attribute in request.delete_attributes {
for attribute in delete_attributes {
if schema
.user_attributes
.get_attribute_type(&attribute)
@@ -244,6 +238,31 @@ impl SqlBackendHandler {
)));
}
}
Ok((update_user_attributes, remove_user_attributes))
}
async fn update_user_with_transaction(
transaction: &DatabaseTransaction,
request: UpdateUserRequest,
) -> Result<()> {
let schema = Self::get_schema_with_transaction(transaction).await?;
let (update_user_attributes, remove_user_attributes) =
Self::compute_user_attribute_changes(
&request.user_id,
request.insert_attributes,
request.delete_attributes,
&schema,
)?;
let lower_email = request.email.as_ref().map(|s| s.as_str().to_lowercase());
let now = chrono::Utc::now().naive_utc();
let update_user = model::users::ActiveModel {
user_id: ActiveValue::Set(request.user_id.clone()),
email: request.email.map(ActiveValue::Set).unwrap_or_default(),
lowercase_email: lower_email.map(ActiveValue::Set).unwrap_or_default(),
display_name: to_value(&request.display_name),
modified_date: ActiveValue::Set(now),
..Default::default()
};
update_user.update(transaction).await?;
if !remove_user_attributes.is_empty() {
model::UserAttributes::delete_many()
@@ -325,6 +344,8 @@ impl UserBackendHandler for SqlBackendHandler {
display_name: to_value(&request.display_name),
creation_date: ActiveValue::Set(now),
uuid: ActiveValue::Set(uuid),
modified_date: ActiveValue::Set(now),
password_modified_date: ActiveValue::Set(now),
..Default::default()
};
let mut new_user_attributes = Vec::new();
@@ -391,24 +412,70 @@ impl UserBackendHandler for SqlBackendHandler {
#[instrument(skip_all, level = "debug", err, fields(user_id = ?user_id.as_str(), group_id))]
async fn add_user_to_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()> {
let new_membership = model::memberships::ActiveModel {
user_id: ActiveValue::Set(user_id.clone()),
group_id: ActiveValue::Set(group_id),
};
new_membership.insert(&self.sql_pool).await?;
let user_id = user_id.clone();
self.sql_pool
.transaction::<_, _, sea_orm::DbErr>(|transaction| {
Box::pin(async move {
let new_membership = model::memberships::ActiveModel {
user_id: ActiveValue::Set(user_id),
group_id: ActiveValue::Set(group_id),
};
new_membership.insert(transaction).await?;
// Update group modification time
let now = chrono::Utc::now().naive_utc();
let update_group = model::groups::ActiveModel {
group_id: Set(group_id),
modified_date: Set(now),
..Default::default()
};
update_group.update(transaction).await?;
Ok(())
})
})
.await?;
Ok(())
}
#[instrument(skip_all, level = "debug", err, fields(user_id = ?user_id.as_str(), group_id))]
async fn remove_user_from_group(&self, user_id: &UserId, group_id: GroupId) -> Result<()> {
let res = model::Membership::delete_by_id((user_id.clone(), group_id))
.exec(&self.sql_pool)
.await?;
if res.rows_affected == 0 {
return Err(DomainError::EntityNotFound(format!(
"No such membership: '{user_id}' -> {group_id:?}"
)));
}
let user_id = user_id.clone();
self.sql_pool
.transaction::<_, _, sea_orm::DbErr>(|transaction| {
Box::pin(async move {
let res = model::Membership::delete_by_id((user_id.clone(), group_id))
.exec(transaction)
.await?;
if res.rows_affected == 0 {
return Err(sea_orm::DbErr::Custom(format!(
"No such membership: '{user_id}' -> {group_id:?}"
)));
}
// Update group modification time
let now = chrono::Utc::now().naive_utc();
let update_group = model::groups::ActiveModel {
group_id: Set(group_id),
modified_date: Set(now),
..Default::default()
};
update_group.update(transaction).await?;
Ok(())
})
})
.await
.map_err(|e| match e {
sea_orm::TransactionError::Connection(sea_orm::DbErr::Custom(msg)) => {
DomainError::EntityNotFound(msg)
}
sea_orm::TransactionError::Transaction(sea_orm::DbErr::Custom(msg)) => {
DomainError::EntityNotFound(msg)
}
sea_orm::TransactionError::Connection(e) => DomainError::DatabaseError(e),
sea_orm::TransactionError::Transaction(e) => DomainError::DatabaseError(e),
})?;
Ok(())
}
}
+1
View File
@@ -6,6 +6,7 @@ edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies]
async-trait = "0.1"
+1
View File
@@ -7,3 +7,4 @@ edition.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
+27 -5
View File
@@ -3,6 +3,7 @@
- [With Docker](#with-docker)
- [With Podman](#with-podman)
- [With Kubernetes](#with-kubernetes)
- [TrueNAS SCALE](#truenas-scale)
- [From a package repository](#from-a-package-repository)
- [With FreeBSD](#with-freebsd)
- [From source](#from-source)
@@ -68,7 +69,7 @@ services:
- LLDAP_JWT_SECRET=REPLACE_WITH_RANDOM
- LLDAP_KEY_SEED=REPLACE_WITH_RANDOM
- LLDAP_LDAP_BASE_DN=dc=example,dc=com
- LLDAP_LDAP_USER_PASS=adminPas$word
- LLDAP_LDAP_USER_PASS=CHANGE_ME # If the password contains '$', escape it (e.g. Pas$$word sets Pas$word)
# If using LDAPS, set enabled true and configure cert and key path
# - LLDAP_LDAPS_OPTIONS__ENABLED=true
# - LLDAP_LDAPS_OPTIONS__CERT_FILE=/path/to/certfile.crt
@@ -93,7 +94,7 @@ front-end.
### With Podman
LLDAP works well with rootless Podman either through command line deployment
or using [quadlets](example_configs/podman-quadlets/). The example quadlets
or using [quadlets](../example_configs/podman-quadlets/). The example quadlets
include configuration with postgresql and file based secrets, but have comments
for several other deployment strategies.
@@ -102,9 +103,30 @@ for several other deployment strategies.
See https://github.com/Evantage-WS/lldap-kubernetes for a LLDAP deployment for Kubernetes
You can bootstrap your lldap instance (users, groups)
using [bootstrap.sh](example_configs/bootstrap/bootstrap.md#kubernetes-job).
using [bootstrap.sh](../example_configs/bootstrap/bootstrap.md#kubernetes-job).
It can be run by Argo CD for managing users in git-opt way, or as a one-shot job.
### TrueNAS SCALE
LLDAP can be installed on **TrueNAS SCALE** using the built-in Apps catalog, allowing users to deploy and manage LLDAP directly from the TrueNAS web interface without manually maintaining containers.
To install:
1. Open the TrueNAS web interface.
2. Navigate to **Apps → Discover Apps**.
3. Search for **LLDAP** and click **Install**.
4. Provide the required configuration values such as:
- Base DN
- Admin credentials
- LDAP / LDAPS ports
- Persistent storage dataset
TrueNAS supports selecting certificates for LDAPS and configuring a public web URL. When LDAPS is enabled, it is recommended to disable the unencrypted LDAP port to ensure secure communication.
A full, step-by-step TrueNAS-specific guide (including recommended ports, certificate configuration, and common integrations) is available here:
👉 [example_configs/truenas-install.md](https://github.com/lldap/lldap/blob/main/example_configs/truenas-install.md)
### From a package repository
**Do not open issues in this repository for problems with third-party
@@ -114,7 +136,7 @@ Depending on the distribution you use, it might be possible to install LLDAP
from a package repository, officially supported by the distribution or
community contributed.
Each package offers a [systemd service](https://wiki.archlinux.org/title/systemd#Using_units) `lldap.service` or [rc.d_lldap](example_configs/freebsd/rc.d_lldap) `rc.d/lldap` to (auto-)start and stop lldap.<br>
Each package offers a [systemd service](https://wiki.archlinux.org/title/systemd#Using_units) `lldap.service` or [rc.d_lldap](../example_configs/freebsd/rc.d_lldap) `rc.d/lldap` to (auto-)start and stop lldap.<br>
When using the distributed packages, the default login is `admin/password`. You can change that from the web UI after starting the service.
<details>
@@ -385,7 +407,7 @@ arguments to `cargo run`. Have a look at the docker template:
`lldap_config.docker_template.toml`.
You can also install it as a systemd service, see
[lldap.service](example_configs/lldap.service).
[lldap.service](../example_configs/lldap.service).
### Cross-compilation
+71
View File
@@ -0,0 +1,71 @@
# Nix Development Environment
LLDAP provides a Nix flake that sets up a complete development environment with all necessary tools and dependencies.
## Requirements
- [Nix](https://nixos.org/download.html) with flakes enabled
- (Optional) [direnv](https://direnv.net/) for automatic environment activation
## Usage
```bash
# Clone the repository
git clone https://github.com/lldap/lldap.git
cd lldap
# Enter the development environment
nix develop
# Build the workspace
cargo build --workspace
# Run tests
cargo test --workspace
# Check formatting and linting
cargo fmt --check --all
cargo clippy --tests --workspace -- -D warnings
# Build frontend
./app/build.sh
# Export GraphQL schema (if needed)
./export_schema.sh
# Start development server
cargo run -- run --config-file lldap_config.docker_template.toml
```
## Building with Nix
You can also build LLDAP directly using Nix:
```bash
# Build the default package (server)
nix build
# Build and run
nix run
```
## Development Shells
The flake provides two development shells:
- `default` - Full development environment
- `ci` - Minimal environment similar to CI
```bash
# Use the CI-like environment
nix develop .#ci
```
## Automatic Environment Activation (Optional)
For automatic environment activation when entering the project directory:
1. Install direnv: `nix profile install nixpkgs#direnv`
2. Set up direnv shell hook in your shell configuration
3. Navigate to the project directory and allow direnv: `direnv allow`
4. The environment will automatically activate when entering the directory
+5
View File
@@ -4,6 +4,7 @@ Some specific clients have been tested to work and come with sample
configuration files:
- [Airsonic Advanced](airsonic-advanced.md)
- [Apache HTTP Server](apache.md)
- [Apache Guacamole](apacheguacamole.md)
- [Apereo CAS Server](apereo_cas_server.md)
- [Authelia](authelia.md)
@@ -11,6 +12,7 @@ configuration files:
- [Bookstack](bookstack.env.example)
- [Calibre-Web](calibre_web.md)
- [Carpal](carpal.md)
- [Continuwuity](continuwuity.md)
- [Dell iDRAC](dell_idrac.md)
- [Dex](dex_config.yml)
- [Dokuwiki](dokuwiki.md)
@@ -19,6 +21,7 @@ configuration files:
- [Ejabberd](ejabberd.md)
- [Emby](emby.md)
- [Ergo IRCd](ergo.md)
- [Gerrit](gerrit.md)
- [Gitea](gitea.md)
- [GitLab](gitlab.md)
- [Grafana](grafana_ldap_config.toml)
@@ -51,6 +54,7 @@ configuration files:
- [Peertube](peertube.md)
- [Penpot](penpot.md)
- [pgAdmin](pgadmin.md)
- [Pocket-ID](pocket-id.md)
- [Portainer](portainer.md)
- [PowerDNS Admin](powerdns_admin.md)
- [Prosody](prosody.md)
@@ -59,6 +63,7 @@ configuration files:
- [Radicale](radicale.md)
- [Rancher](rancher.md)
- [Seafile](seafile.md)
- [Semaphore](semaphore.md)
- [Shaarli](shaarli.md)
- [Snipe-IT](snipe-it.md)
- [SonarQube](sonarqube.md)
+65
View File
@@ -0,0 +1,65 @@
# Configuration for Apache
This example snippet provides space under `/webdav/<username>/` if they log in as the user in question.
## Apache LDAP Configuration
```
# The User/Group specified in httpd.conf needs to have write permissions
# on the directory where the DavLockDB is placed and on any directory where
# "Dav On" is specified.
DavLockDB "/var/local/apache2/DavLock"
Alias /webdav "/var/local/apache2/data"
<Directory "/var/local/apache2/data">
AllowOverride None
Require all denied
DirectoryIndex disabled
</Directory>
<DirectoryMatch "^/var/local/apache2/data/(?<user>[^/]+)">
AuthType Basic
AuthName "LDAP Credentials"
AuthBasicProvider ldap
AuthLDAPURL ldap://lldap:3890/ou=people,dc=example,dc=com?uid?sub?(objectClass=person)
AuthLDAPBindDN uid=integration,ou=people,dc=example,dc=com
AuthLDAPBindPassword [redacted]
<RequireAll>
Require ldap-user "%{env:MATCH_USER}"
Require ldap-group cn=WebDAV,ou=groups,dc=example,dc=com
</RequireAll>
Dav On
Options +Indexes
</DirectoryMatch>
```
### Notes
* Make sure you create the `data` directory, and the subdirectories for your users.
* `integration` was an LDAP user I added with strict readonly.
* The `WebDAV` group was something I added and put relevant users into, more as a test of functionality than out of any need.
* I left the comment from the Apache DAV config in because it's not kidding around and it won't be obvious what's going wrong from the Apache logs if you miss that.
## Apache Orchestration
The stock Apache server with that stanza added to the bottom of the stock config and shared into the container.
```
webdav:
image: httpd:2.4.66-trixie
restart: always
volumes:
- /opt/webdav:/var/local/apache2
- ./httpd.conf:/usr/local/apache2/conf/httpd.conf
labels:
- "traefik.enable=true"
- "traefik.http.routers.webdav.entrypoints=websecure"
- "traefik.http.routers.webdav.rule=Host(`redacted`) && PathPrefix(`/webdav`)"
- "traefik.http.routers.webdav.tls.certresolver=myresolver"
- "traefik.http.routers.webdav.service=webdav-service"
- "traefik.http.services.webdav-service.loadbalancer.server.port=80"
```
+11 -1
View File
@@ -64,7 +64,7 @@ dc=example,dc=com
# Additional settings
## Group
## Parent Group
```
---------
```
@@ -99,6 +99,16 @@ ou=groups
member
```
## User membership attribute
```
distinguishedName
```
## Looking using user attribute
```
false
```
## Object uniqueness field
```
uid
+5 -27
View File
@@ -72,6 +72,7 @@ Fields description:
* `id`: it's just username (**MANDATORY**)
* `email`: self-explanatory (**MANDATORY**)
* `password`: would be used to set the password using `lldap_set_password` utility
* `password_file`: path to a file containing the password otherwise same as above
* `displayName`: self-explanatory
* `firstName`: self-explanatory
* `lastName`: self-explanatory
@@ -130,7 +131,7 @@ Fields description:
"isVisible": true
},
{
"name": "mail_alias",
"name": "mail-alias",
"attributeType": "STRING",
"isEditable": false,
"isList": true,
@@ -246,14 +247,14 @@ spec:
restartPolicy: OnFailure
containers:
- name: lldap-bootstrap
image: lldap/lldap:v0.5.0
image: lldap/lldap:latest
command:
- /bootstrap/bootstrap.sh
- /app/bootstrap.sh
env:
- name: LLDAP_URL
value: "http://lldap:8080"
value: "http://lldap:17170"
- name: LLDAP_ADMIN_USERNAME
valueFrom: { secretKeyRef: { name: lldap-admin-user, key: username } }
@@ -265,11 +266,6 @@ spec:
value: "true"
volumeMounts:
- name: bootstrap
mountPath: /bootstrap/bootstrap.sh
readOnly: true
subPath: bootstrap.sh
- name: user-configs
mountPath: /bootstrap/user-configs
readOnly: true
@@ -279,27 +275,9 @@ spec:
readOnly: true
volumes:
- name: bootstrap
configMap:
name: bootstrap
defaultMode: 0555
items:
- key: bootstrap.sh
path: bootstrap.sh
- name: user-configs
projected:
sources:
- secret:
name: lldap-admin-user
items:
- key: user-config.json
path: admin-config.json
- secret:
name: lldap-password-manager-user
items:
- key: user-config.json
path: password-manager-config.json
- secret:
name: lldap-bootstrap-configs
items:
+15
View File
@@ -0,0 +1,15 @@
# Configuration for Continuwuity
This example is with environment vars from my docker-compose.yml, this also works just as well with a [config file](https://continuwuity.org/reference/config). `uid=query,ou=people,dc=example,dc=com` is a read-only user and you need to put their password into `/etc/bind_password_file`. Users need to be in the group `matrix` to log in and users in the group `matrix-admin` will be an admin.
```
CONTINUWUITY_LDAP__ENABLE: 'true'
CONTINUWUITY_LDAP__LDAP_ONLY: 'true'
CONTINUWUITY_LDAP__URI: 'ldap://lldap.example.com:3890'
CONTINUWUITY_LDAP__BASE_DN: 'ou=people,dc=example,dc=com'
CONTINUWUITY_LDAP__BIND_DN: 'uid=query,ou=people,dc=example,dc=com'
CONTINUWUITY_LDAP__BIND_PASSWORD_FILE: '/etc/bind_password_file'
CONTINUWUITY_LDAP__FILTER: '(memberOf=matrix)'
CONTINUWUITY_LDAP__UID_ATTRIBUTE: 'uid'
CONTINUWUITY_LDAP__ADMIN_FILTER: '(memberOf=matrix-admin)'
```
+18
View File
@@ -0,0 +1,18 @@
# Configuration for Gerrit
Edit `gerrit.config`:
```ini
[auth]
type = ldap
[ldap]
server = ldap://lldap:3890
supportAnonymous = false
username = uid=gerritadmin,ou=people,dc=example.com,dc=com
accountBase = ou=people,dc=example.com,dc=com
accountPattern = (uid=${username})
accountFullName = cn
accountEmailAddress = mail
```
The `supportAnonymous = false` must be set.
+46
View File
@@ -0,0 +1,46 @@
# Gogs LDAP configuration
Gogs can make use of LDAP and therefore lldap.
The following configuration is adapted from the example configuration at [their repository](https://github.com/gogs/gogs/blob/main/conf/auth.d/ldap_bind_dn.conf.example).
The example is a container configuration - the file should live within `conf/auth.d/some_name.conf`:
```yaml
$ cat /srv/git/gogs/conf/auth.d/ldap_bind_dn.conf
id = 101
type = ldap_bind_dn
name = LDAP BindDN
is_activated = true
is_default = true
[config]
host = ldap.example.com
port = 6360
# 0 - Unencrypted, 1 - LDAPS, 2 - StartTLS
security_protocol = 1
# You either need to install the LDAPS certificate into your trust store -
# Or skip verification altogether - for a restricted container deployment a sane default.
skip_verify = true
bind_dn = uid=<binduser>,ou=people,dc=example,dc=com
bind_password = `yourPasswordInBackticks`
user_base = dc=example,dc=com
attribute_username = uid
attribute_name = givenName
attribute_surname = sn
attribute_mail = mail
attributes_in_bind = false
# restricts on the `user_base`.
filter = (&(objectClass=person)(uid=%s))
# The initial administrator has to enable admin privileges.
# This is only possible for users who were logged in once.
# This renders the following filter obsolete; Though its response is accepted by Gogs.
admin_filter = (memberOf=cn=<yourAdminGroup>,ou=groups,dc=example,dc=com)
```
The `binduser` shall be a member of `lldap_strict_readonly`.
The group `yourAdminGroup` should be adapted to your requirement - Otherwise the entire line can be omitted.
The diamond brackets are for readability and are not required.
## Tested on Gogs
v0.14+dev via podman 4.3.1
+8 -1
View File
@@ -41,7 +41,14 @@ name = "displayName"
surname = "sn"
username = "uid"
# If you want to map your ldap groups to grafana's groups, see: https://grafana.com/docs/grafana/latest/auth/ldap/#group-mappings
# If you want to map your ldap groups to grafana's groups, configure the group query:
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-access/configure-authentication/ldap/#posix-schema
# group_search_filter = "(&(objectClass=groupOfUniqueNames)(uniqueMember=%s))"
# group_search_base_dns = ["ou=groups,dc=example,dc=com"]
# group_search_filter_user_attribute = "uid"
#
# Then configure the groups:
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-access/configure-authentication/ldap/#group-mappings
# As a quick example, here is how you would map lldap's admin group to grafana's admin
# [[servers.group_mappings]]
# group_dn = "cn=lldap_admin,ou=groups,dc=example,dc=org"
+2 -2
View File
@@ -64,7 +64,7 @@ if [[ ! -z "$2" ]] && ! jq -e '.groups|map(.displayName)|index("'"$2"'")' <<< $U
exit 1
fi
DISPLAY_NAME=$(jq -r .displayName <<< $USER_JSON)
DISPLAY_NAME=$(jq -r '.displayName // .id' <<< $USER_JSON)
IS_ADMIN=false
if [[ ! -z "$3" ]] && jq -e '.groups|map(.displayName)|index("'"$3"'")' <<< "$USER_JSON" > /dev/null 2>&1; then
@@ -88,4 +88,4 @@ if [[ "$IS_LOCAL" = true ]]; then
echo "local_only = true"
else
echo "local_only = false"
fi
fi
+8 -4
View File
@@ -58,9 +58,9 @@ services:
- LDAP_SEARCH_BASE=ou=people,dc=example,dc=com
- LDAP_BIND_DN=uid=admin,ou=people,dc=example,dc=com
- LDAP_BIND_PW=adminpassword
- LDAP_QUERY_FILTER_USER=(&(objectClass=inetOrgPerson)(|(uid=%u)(mail=%u)))
- LDAP_QUERY_FILTER_USER=(&(objectClass=inetOrgPerson)(mail=%s))
- LDAP_QUERY_FILTER_GROUP=(&(objectClass=groupOfUniqueNames)(uid=%s))
- LDAP_QUERY_FILTER_ALIAS=(&(objectClass=inetOrgPerson)(|(uid=%u)(mail=%u)))
- LDAP_QUERY_FILTER_ALIAS=(&(objectClass=inetOrgPerson)(mail=%s))
- LDAP_QUERY_FILTER_DOMAIN=(mail=*@%s)
# <<< Postfix LDAP Integration
# >>> Dovecot LDAP Integration
@@ -78,7 +78,8 @@ services:
container_name: roundcubemail
restart: always
volumes:
- roundcube_data:/var/www/html
- roundcube_config:/var/roundcube/config
- roundcube_plugins:/var/www/html/plugins
ports:
- "9002:80"
environment:
@@ -86,12 +87,15 @@ services:
- ROUNDCUBEMAIL_SKIN=elastic
- ROUNDCUBEMAIL_DEFAULT_HOST=mailserver # IMAP
- ROUNDCUBEMAIL_SMTP_SERVER=mailserver # SMTP
- ROUNDCUBEMAIL_COMPOSER_PLUGINS=roundcube/carddav
- ROUNDCUBEMAIL_PLUGINS=carddav
volumes:
mailserver-data:
mailserver-config:
mailserver-state:
lldap_data:
roundcube_data:
roundcube_config:
roundcube_plugins:
```
+31
View File
@@ -0,0 +1,31 @@
# Open-WebUI LDAP configuration
For the GUI settings (recommended) go to:
`Admin Panel > General`.
There you find the LDAP config.
For the initial activation, restart OpenWebUI to load the LDAP module.
The following configurations have to be provided.
The user `binduser` has to be member of `lldap_strict_readonly`.
| environment variable | GUI variable | example value | elaboration |
|----------------------|--------------|---------------|-------------|
| `ENABLE_LDAP` | LDAP | `true` | Toggle |
| `LDAP_SERVER_LABEL` | Label | `any` (lldap) | name |
| `LDAP_SERVER_HOST` | Host | `ldap.example.org` | IP/domain without scheme or port |
| `LDAP_SERVER_PORT` | Port | `6360` | When starting Open-WebUI sometimes it only accepts the default LDAP or LDAPS port (only ENV configuration) |
| `LDAP_ATTRIBUTE_FOR_MAIL` | Attribute for Mail | `mail` | default |
| `LDAP_ATTRIBUTE_FOR_USERNAME` | Attribute for Username | `uid` | default |
| `LDAP_APP_DN` | Application DN | `uid=binduser,ou=people,dc=example,dc=org` | Hovering shows: Bind user-dn |
| `LDAP_APP_PASSWORD` | Application DN Password | `<binduser-pw>` | - |
| `LDAP_SEARCH_BASE` | Search Base | `ou=people,dc=example,dc=org` | Who should get access from your instance. |
| `LDAP_SEARCH_FILTER` | Search Filter | `(objectClass=person)` or `(\|(objectClass=person)(memberOf=cn=webui-members,ou=groups,dc=example,dc=org))` | Query for Open WebUI account names. |
| `LDAP_USE_TLS` | TLS | `true` | Should be `true` for LDAPS, `false` for plain LDAP |
| `LDAP_CA_CERT_FILE` | Certificate Path | `/ca-chain.pem` | required when TLS activated |
| `LDAP_VALIDATE_CERT` | Validate Certificate | `true` | Set to `false` for self-signed certificates |
| `LDAP_CIPHERS` | Ciphers | ALL | default |
## Tested on Open WebUI
v0.6.26 via podman 5.4.2
+3
View File
@@ -92,6 +92,9 @@ Enable the following options on the OPNsense configuration page for your LLDAP s
- Synchronize groups: `Checked`
- Automatic user creation: `Checked`
### Constraint Groups
This limits the groups to prevent injection attacks. If you want to enable this feature, you need to add ou=groups,dc=example,dc=com to the Authentication Containers field. Be sure to separate with a semicolon. Otherwise disable this option.
### Create OPNsense Group
Go to `System > Access > Groups` and create a new group with the **same** name as the LLDAP group used to authenticate users for OPNsense.
+146 -47
View File
@@ -1,40 +1,55 @@
# Getting Started with UNIX PAM using SSSD
This guide was tested with LDAPS on debian 12 with SSSD 2.8.2 and certificates signed by a registered CA.
## Configuring LLDAP
### Configure LDAPS
You **must** use LDAPS. You MUST NOT use plain LDAP. Even over a private network this costs you nearly nothing, and passwords will be sent in PLAIN TEXT without it.
Even in private networks you **should** configure LLDAP to communicate over HTTPS, otherwise passwords will be
transmitted in plain text. Just using a self-signed certificate will drastically improve security.
```jsx
You can generate an SSL certificate for LLDAP with the following command. The `subjectAltName` is **required**. Make
sure all domains are listed there, even your `CN`.
```bash
openssl req -x509 -nodes -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 36500 -subj "/CN=ldap.example.com" -addext "subjectAltName = DNS:ldap.example.com"
```
With the generated certificates for your domain, copy the certificates and enable ldaps in the LLDAP configuration.
```
[ldaps_options]
enabled=true
port=6360
port=636
cert_file="cert.pem"
key_file="key.pem"
```
You can generate an SSL certificate for it with the following command. The `subjectAltName` is REQUIRED. Make sure all domains are listed there, even your `CN`.
### Setting up custom attributes
```bash
openssl req -x509 -nodes -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 36500 -nodes -subj "/CN=lldap.example.net" -addext "subjectAltName = DNS:lldap.example.net"
```
SSSD makes use of the `posixAccount` and `sshPublicKey` object types, their attributes have to be created manually in
LLDAP.
### Setting up the custom attributes
Add the following custom attributes to the **User schema**.
You will need to add the following custom attributes to the **user schema**.
| Attribute | Type | Multiple | Example |
|---------------|---------|:--------:|------------|
| uidNumber | integer | | 3000 |
| gidNumber | integer | | 3000 |
| homeDirectory | string | | /home/user |
| unixShell | string | | /bin/bash |
| sshPublicKey | string | X | *sshKey* |
- uidNumber (integer)
- gidNumber (integer, multiple values)
- homeDirectory (string)
- unixShell (string)
- sshPublicKey (string) (only if youre setting up SSH Public Key Sync)
Add the following custom attributes to the **Group schema.**
You will need to add the following custom attributes to the **group schema.**
| Attribute | Type | Multiple | Example |
|---------------|---------|:--------:|------------|
| gidNumber | integer | | 3000 |
- gidNumber (integer)
You will now need to populate these values for all the users you wish to be able to login.
The only optional attributes are `unixShell` and `sshPublicKey`. All other attributes **must** be fully populated for
each group and user being used by SSSD. The `gidNumber` of the user schema represents the users primary group. To add
more groups to a user, add the user to groups with a `gidNumber` set.
## Client setup
@@ -45,25 +60,113 @@ You need to install the packages `sssd` `sssd-tools` `libnss-sss` `libpam-sss` `
E.g. on Debian/Ubuntu
```bash
sudo apt update; sudo apt install -y sssd sssd-tools libnss-sss libpam-sss libsss-sudo
sudo apt install -y sssd sssd-tools libnss-sss libpam-sss libsss-sudo
```
### Configure the client packages
Use your favourite text editor to create/open the file `/etc/sssd/sssd.conf` .
This example makes the following assumptions which need to be adjusted:
E.g. Using nano
* Domain: `example.com`
* Domain Component: `dc=example,dc=com`
* LDAP URL: `ldaps://ldap.example.com/`
* Bind Username: `binduser`
* Bind Password: `bindpassword`
The global config filters **out** the root user and group. It also restricts the number of failed login attempts
with cached credentials if the server is unreachable.
Use your favourite text editor to create the SSSD global configuration:
```bash
sudo nano /etc/sssd/sssd.conf
```
Insert the contents of the provided template (sssd.conf), but you will need to change some of the configuration in the file. Comments have been made to guide you. The config file is an example if your LLDAP server is hosted at `lldap.example.com` and your domain is `example.com` with your dc being `dc=example,dc=com`.
```
[sssd]
config_file_version = 2
services = nss, pam, ssh
domains = example.com
SSSD will **refuse** to run if its config file is world-readable, so apply the following permissions to it:
[nss]
filter_users = root
filter_groups = root
[pam]
offline_failed_login_attempts = 3
offline_failed_login_delay = 5
[ssh]
```
The following domain configuration is set up for the LLDAP `RFC2307bis` schema and the custom attributes created at the
beginning of the guide. It allows all configured LDAP users to log in by default while filtering out users and groups
which don't have their posix IDs set.
Because caching is enabled make sure to check the [Debugging](#Debugging) section on how to
flush the cache if you are having problems.
Create a separate configuration file for your domain.
```bash
sudo nano /etc/sssd/conf.d/example.com.conf
```
```
[domain/example.com]
id_provider = ldap
auth_provider = ldap
chpass_provider = ldap
access_provider = permit
enumerate = True
cache_credentials = True
# ldap provider
ldap_uri = ldaps://ldap.example.com/
ldap_schema = rfc2307bis
ldap_search_base = dc=example,dc=com
ldap_default_bind_dn = uid=binduser,ou=people,dc=example,dc=com
ldap_default_authtok = bindpassword
# For certificates signed by a registered CA
ldap_tls_cacert = /etc/ssl/certs/ca-certificates.crt
# For self signed certificates
# ldap_tls_cacert = cert.pem
ldap_tls_reqcert = demand
# users
ldap_user_search_base = ou=people,dc=example,dc=com?subtree?(uidNumber=*)
ldap_user_object_class = posixAccount
ldap_user_name = uid
ldap_user_gecos = cn
ldap_user_uid_number = uidNumber
ldap_user_gid_number = gidNumber
ldap_user_home_directory = homeDirectory
ldap_user_shell = unixShell
ldap_user_ssh_public_key = sshPublicKey
# groups
ldap_group_search_base = ou=groups,dc=example,dc=com?subtree?(gidNumber=*)
ldap_group_object_class = groupOfUniqueNames
ldap_group_name = cn
ldap_group_gid_number = gidNumber
ldap_group_member = uniqueMember
```
SSSD will **refuse** to run if its config files have the wrong permissions, so apply the following permissions to the
files:
```bash
sudo chmod 600 /etc/sssd/sssd.conf
sudo chmod 600 /etc/sssd/conf.d/example.com.conf
```
Enable automatic creation of home directories:
```bash
sudo pam-auth-update --enable mkhomedir
```
Restart SSSD to apply any changes:
@@ -72,26 +175,11 @@ Restart SSSD to apply any changes:
sudo systemctl restart sssd
```
Enable automatic creation of home directories
```bash
sudo pam-auth-update --enable mkhomedir
```
## Permissions and SSH Key sync
### SSH Key Sync
In order to do this, you need to setup the custom attribute `sshPublicKey` in the user schema. Then, you must uncomment the following line in the SSSD config file (assuming you are using the provided template):
```bash
sudo nano /etc/sssd/sssd.conf
```
```jsx
ldap_user_ssh_public_key = sshPublicKey
```
And the following to the bottom of your OpenSSH config file:
Add the following to the bottom of your OpenSSH config file:
```bash
sudo nano /etc/ssh/sshd_config
@@ -111,11 +199,15 @@ sudo systemctl restart sssd
### Permissions Sync
Linux often manages permissions to tools such as Sudo and Docker based on group membership. There are two possible ways to achieve this.
Linux often manages permissions to tools such as Sudo and Docker based on group membership. There are two possible ways
to achieve this.
**Number 1**
**Option 1**
**If all your client systems are setup identically,** you can just check the group id of the local group, i.e. Sudo being 27 on most Debian and Ubuntu installs, and set that as the gid in LLDAP. For tools such as docker, you can create a group before install with a custom gid on the system, which must be the same on all, and use that GID on the LLDAP group
**If all your client systems are set up identically,** you can just check the group id of the local group, i.e. `sudo`
being 27 on most Debian and Ubuntu installs, and set that as the gid in LLDAP.
For tools such as docker, you can create a group before install with a custom gid on the system, which must be the same
on all, and use that GID on the LLDAP group
Sudo
@@ -123,15 +215,16 @@ Sudo
Docker
```jsx
```bash
sudo groupadd docker -g 722
```
![image](https://github.com/user-attachments/assets/face88d0-5a20-4442-a5e3-9f6a1ae41b68)
**Number 2**
**Option 2**
Create a group in LLDAP that you would like all your users who have sudo access to be in, and add the following to the bottom of `/etc/sudoers` .
Create a group in LLDAP that you would like all your users who have sudo access to be in, and add the following to the
bottom of `/etc/sudoers` .
E.g. if your group is named `lldap_sudo`
@@ -143,15 +236,21 @@ E.g. if your group is named `lldap_sudo`
To verify your config files validity, you can run the following command
```jsx
```bash
sudo sssctl config-check
```
To flush SSSDs cache
```jsx
```bash
sudo sss_cache -E
```
Man pages
```bash
man sssd
man sssd-ldap
```
## Final Notes
To see the old guide for NSLCD, go to NSLCD.md.
+27
View File
@@ -0,0 +1,27 @@
# LLDAP Configuration for Pocket-ID
[Pocket-ID](https://pocket-id.org/) is a simple, easy-to-use OIDC provider that lets users authenticate to your services using passkeys.
| | | Value |
|-----------------------|------------------------------------|-----------------------------------------------------------|
| **Client Configuration** | LDAP URL | ldaps://url:port
| | LDAP Bind DN | uid=binduser,ou=people,dc=example,dc=com |
| | LDAP Bind Password | password for binduser |
| | LDAP Base DN | dc=example,dc=com |
| | User Search Filter | (objectClass=person) |
| | Groups Search Filter | (objectClass=groupOfNames) |
| | Skip Certificate Verification | true/false |
| | Keep disabled users from LDAP | false |
| **Attribute Mapping** | User Unique Identifier Attribute | uuid |
| | Username Attribute | uid |
| | User Mail Attribute | mail |
| | User First Name Attribute | givenName |
| | User Last Name Attribute | sn |
| | User Profile Picture Attribute | jpegPhoto |
| | Group Members Attribute | member |
| | Group Unique Identifier Attribute | uuid |
| | Group Name Attribute | cn |
| | Admin Group Name | pocketid_admin_group_name |
Save and Sync.
+6 -6
View File
@@ -31,13 +31,13 @@ Starting `lldap.service` will start all the other services, but stopping it will
- At this point, you should be able to start the container.
- Test this with:
```bash
$ podman --user daemon-reload
$ podman --user start lldap
$ podman --user status lldap
$ systemctl --user daemon-reload
$ systemctl --user start lldap
$ systemctl --user status lldap
```
- Assuming it launched correctly, you should now stop it again.
```bash
$ podman --user stop lldap
$ systemctl --user stop lldap
```
- Make any adjustments you feel are necessary to the network files.
- Now all that's left to do is the [bootstrapping process](../bootstrap/bootstrap.md#docker-compose):
@@ -45,8 +45,8 @@ Starting `lldap.service` will start all the other services, but stopping it will
- Toward the end of the container section, uncomment the lines in `lldap.container` regarding the bootstrap process.
- Start the container:
```bash
$ podman --user daemon-reload
$ podman --user start lldap
$ systemctl --user daemon-reload
$ systemctl --user start lldap
```
- Attach a terminal to the container, and run `bootstrap.sh`:
```bash
+8 -2
View File
@@ -56,9 +56,15 @@ ou=groups,dc=example,dc=com
```
#### Group Membership Attribute
```
cn
uniqueMember
```
#### Group Filter
Is optional:
```
is optional
(objectClass=groupofuniquenames)
```
## Admin group search configurations
Use the same configurations as above to grant each users admin rights in their respective teams.
You can then also fetch all groups, and select which groups have universal admin rights.
+37
View File
@@ -0,0 +1,37 @@
# Configuration for Semaphore
Semaphore configuration is in `config.json`
Just add the following lines:
```json
"ldap_enable": true,
"ldap_needtls": true,
"ldap_server": "ldaps_server:6360",
"ldap_binddn": "uid=semaphorebind,ou=people,dc=example,dc=com",
"ldap_bindpassword": "verysecretpassword",
"ldap_searchdn": "ou=people,dc=example,dc=com",
"ldap_searchfilter": "(|(uid=%[1]s)(mail=%[1]s))",
"ldap_mappings": {
"dn": "dn",
"mail": "mail",
"uid": "uid",
"cn": "cn"
}
```
If you use environment variables:
```bash
Environment=SEMAPHORE_LDAP_ENABLE=true
Environment=SEMAPHORE_LDAP_SERVER="ldaps_server:6360"
Environment=SEMAPHORE_LDAP_NEEDTLS=true
Environment=SEMAPHORE_LDAP_BIND_DN="uid=semaphorebind,ou=people,dc=example,dc=com"
Environment=SEMAPHORE_LDAP_BIND_PASSWORD="verysecretpassword"
Environment=SEMAPHORE_LDAP_SEARCH_DN="ou=people,dc=example,dc=com"
Environment=SEMAPHORE_LDAP_SEARCH_FILTER="(|(uid=%[1]s)(mail=%[1]s))"
Environment=SEMAPHORE_LDAP_MAPPING_UID="uid"
Environment=SEMAPHORE_LDAP_MAPPING_CN="cn"
Environment=SEMAPHORE_LDAP_MAPPING_MAIL="mail"
Environment=SEMAPHORE_LDAP_MAPPING_DN="dn"
```
You can log in with username or email.
+10
View File
@@ -48,3 +48,13 @@ To integrate with LLDAP,
allow-invalid-certs = true
enable = false
```
## Email alias
If you want to enable [email aliases](https://stalw.art/docs/mta/inbound/rcpt/#catch-all-addresses), you have to create a new *User-defined attribute* under *User schema* of type string. Currently, LLDAP doesn't support multi-value filters. If you want multiple aliases, you will have to create multiple attributes (`mailAlias1`, `mailAlias2`, ..., `mailAliasN`), where `N` is the maximum number of aliases an account will have.
You also need to change your ldap filter for emails.
```toml
[directory.ldap.filter]
# Add one clause per alias attribute you created (example: mailAlias1..mailAlias3)
email = "(&(objectclass=person)(|(mail=?)(mailAlias1=?)(mailAlias2=?)(mailAlias3=?)))"
```
+126
View File
@@ -0,0 +1,126 @@
# Installing and Configuring LLDAP on TrueNAS
This guide walks through installing **LLDAP** from the TrueNAS Apps catalog and performing a basic configuration suitable for sharing authentication between multiple applications that support LDAP authentication.
It is intended to accompany the example configuration files in this repository and assumes a basic familiarity with the TrueNAS web interface.
## Prerequisites
- TrueNAS SCALE with Apps enabled
- Administrative access to the TrueNAS UI
- A system with working networking and DNS
- Optional but recommended: HTTPS certificates managed by TrueNAS
## Step 1: Install LLDAP from the TrueNAS Apps Catalog
1. Log in to the **TrueNAS web interface**.
2. Navigate to **Apps → Discover Apps**.
3. Search for **LLDAP**.
4. Click **Install**.
You will be presented with the LLDAP application configuration form.
## Step 2: Application Configuration
Below are the key configuration sections and recommended settings based on the official catalog definition.
### Application Name
- Leave the default name or choose a descriptive one (e.g. `lldap`).
### Networking
- **Web Port**: Default application port is typically **30325**. There is no standard port for the LLDAP web UI; this value is configurable in TrueNAS.
- **LDAP Port**:
- Standard LDAP port: **389**
- Default port configured by the TrueNAS app: **30326**
- **LDAPS Port**:
- Standard LDAPS port: **636**
- Default port configured by the TrueNAS app: **30327**
It is recommended to adjust these ports to suit your environment. Using standard ports (389/636) can simplify client configuration, but non-standard ports may be preferred to avoid conflicts on the host system. Ensure the selected ports are not already in use.
If LDAPS is enabled, it is strongly recommended to **disable the LDAP port** to ensure all directory traffic is encrypted.
### Authentication / Admin Account
- **LLDAP Admin Username**: Set an admin username (e.g. `admin`).
- **LLDAP Admin Password**: Set a strong password. This account is used to access the LLDAP web UI.
> ⚠️ Save this password securely. You will need it to log in and manage users and groups.
### Base DN Configuration
These values define your LDAP directory structure:
- **Base DN**: Example: `dc=example,dc=com`
- **User DN**: Typically `ou=people,dc=example,dc=com`
- **Group DN**: Typically `ou=groups,dc=example,dc=com`
These values must be consistent with the configuration used by client applications.
## Step 3: Storage Configuration
LLDAP requires persistent storage for its database.
- Configure an **application dataset** or **host path** for LLDAP data.
- Ensure the dataset is backed up as part of your normal TrueNAS backup strategy.
## Step 4: (Optional) Enable HTTPS Using TrueNAS Certificates
If your TrueNAS system manages certificates:
1. In the app configuration, select **Use Existing Certificate**.
2. Choose a certificate issued by TrueNAS.
3. Ensure the web port is accessed via `https://`.
This avoids storing certificate files inside the container and improves overall security.
## Step 5: Deploy the App
1. Review all configuration values.
2. Click **Install**.
3. Wait for the application status to show **Running**.
## Step 6: Access the LLDAP Web UI
- Navigate to: `http(s)://<truenas-ip>:<web-port>`
- Log in using the admin credentials you configured earlier.
From here you can:
- Create users
- Create groups
- Assign users to groups
## Step 7: Using LLDAP with Other Applications
LLDAP can be used as a central identity provider for many popular applications available in the TrueNAS Apps catalog. Common examples include:
- **Jellyfin** (media server)
- **Nextcloud** (collaboration and file sharing)
- **Gitea** (self-hosted Git service)
- **Grafana** (monitoring and dashboards)
- **MinIO** (object storage)
Configuration examples for several of these applications are also available in the upstream LLDAP repository under `example_configs`.
When configuring a client application:
- **LDAP Host**: TrueNAS IP address or the LLDAP app service name
- **LDAP / LDAPS Port**: As configured during install (prefer LDAPS if enabled)
- **Bind DN**: A dedicated service (bind) account or admin DN
- **Bind Password**: Password for the bind account
- **Base DN**: Must match the LLDAP Base DN
Once configured, users can authenticate to multiple applications using a single set of credentials managed centrally by LLDAP.
## Notes and Tips
- Prefer creating a **dedicated bind user** for applications instead of using the admin account.
- Keep Base DN values consistent across all services.
- Back up the LLDAP dataset regularly.
## References
- [TrueNAS Apps Catalog](https://apps.truenas.com/catalog/lldap/)
- [TrueNAS SCALE Documentation](https://www.truenas.com/docs/scale/)
Generated
+98
View File
@@ -0,0 +1,98 @@
{
"nodes": {
"crane": {
"locked": {
"lastModified": 1757183466,
"narHash": "sha256-kTdCCMuRE+/HNHES5JYsbRHmgtr+l9mOtf5dpcMppVc=",
"owner": "ipetkov",
"repo": "crane",
"rev": "d599ae4847e7f87603e7082d73ca673aa93c916d",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1757487488,
"narHash": "sha256-zwE/e7CuPJUWKdvvTCB7iunV4E/+G0lKfv4kk/5Izdg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ab0f3607a6c7486ea22229b92ed2d355f1482ee0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1757730403,
"narHash": "sha256-Jxl4OZRVsXs8JxEHUVQn3oPu6zcqFyGGKaFrlNgbzp0=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "3232f7f8bd07849fc6f4ae77fe695e0abb2eba2c",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}
+162
View File
@@ -0,0 +1,162 @@
{
description = "LLDAP - Light LDAP implementation for authentication";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
crane = {
url = "github:ipetkov/crane";
};
};
outputs = { self, nixpkgs, flake-utils, rust-overlay, crane }:
flake-utils.lib.eachDefaultSystem (system:
let
overlays = [ (import rust-overlay) ];
pkgs = import nixpkgs {
inherit system overlays;
};
# MSRV from the project
rustVersion = "1.89.0";
# Rust toolchain with required components
rustToolchain = pkgs.rust-bin.stable.${rustVersion}.default.override {
extensions = [ "rust-src" "clippy" "rustfmt" ];
targets = [
"wasm32-unknown-unknown"
"x86_64-unknown-linux-musl"
"aarch64-unknown-linux-musl"
"armv7-unknown-linux-musleabihf"
];
};
craneLib = crane.lib.${system}.overrideToolchain rustToolchain;
# Common build inputs
nativeBuildInputs = with pkgs; [
# Rust toolchain and tools
rustToolchain
wasm-pack
# Build tools
pkg-config
# Compression and utilities
gzip
curl
wget
# Development tools
git
jq
# Cross-compilation support
gcc
];
buildInputs = with pkgs; [
# System libraries that might be needed
openssl
sqlite
] ++ lib.optionals stdenv.isDarwin [
# macOS specific dependencies
darwin.apple_sdk.frameworks.Security
darwin.apple_sdk.frameworks.SystemConfiguration
];
# Environment variables
commonEnvVars = {
CARGO_TERM_COLOR = "always";
RUST_BACKTRACE = "1";
# Cross-compilation environment
CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER = "${pkgs.pkgsStatic.stdenv.cc}/bin/cc";
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER = "${pkgs.pkgsCross.aarch64-multiplatform.stdenv.cc}/bin/aarch64-unknown-linux-gnu-gcc";
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER = "${pkgs.pkgsCross.armv7l-hf-multiplatform.stdenv.cc}/bin/arm-unknown-linux-gnueabihf-gcc";
};
in
{
# Development shells
devShells = {
default = pkgs.mkShell ({
inherit nativeBuildInputs buildInputs;
shellHook = ''
echo "🔐 LLDAP Development Environment"
echo "==============================================="
echo "Rust version: ${rustVersion}"
echo "Standard cargo commands available:"
echo " cargo build --workspace - Build the workspace"
echo " cargo test --workspace - Run tests"
echo " cargo clippy --tests --workspace -- -D warnings - Run linting"
echo " cargo fmt --check --all - Check formatting"
echo " ./app/build.sh - Build frontend WASM"
echo " ./export_schema.sh - Export GraphQL schema"
echo "==============================================="
echo ""
# Ensure wasm-pack is available
if ! command -v wasm-pack &> /dev/null; then
echo " wasm-pack not found in PATH"
fi
# Check if we're in the right directory
if [[ "$(git rev-parse --show-toplevel 2>/dev/null)" == "$PWD" ]]; then
echo " Run this from the project root directory"
fi
'';
} // commonEnvVars);
# Minimal shell for CI-like environment
ci = pkgs.mkShell ({
inherit nativeBuildInputs buildInputs;
shellHook = ''
echo "🤖 LLDAP CI Environment"
echo "Running with Rust ${rustVersion}"
'';
} // commonEnvVars);
};
# Package outputs (optional - for building with Nix)
packages = {
default = craneLib.buildPackage {
src = craneLib.cleanCargoSource (craneLib.path ./.);
inherit nativeBuildInputs buildInputs;
# Build only the server by default
cargoExtraArgs = "-p lldap";
# Skip tests in the package build
doCheck = false;
meta = with pkgs.lib; {
description = "Light LDAP implementation for authentication";
homepage = "https://github.com/lldap/lldap";
license = licenses.gpl3Only;
maintainers = with maintainers; [ ];
platforms = platforms.unix;
};
};
};
# Formatter for the flake itself
formatter = pkgs.nixpkgs-fmt;
# Apps for running via `nix run`
apps = {
default = flake-utils.lib.mkApp {
drv = self.packages.${system}.default;
};
};
});
}
+13
View File
@@ -159,3 +159,16 @@ key_seed = "RanD0m STR1ng"
#cert_file="/data/cert.pem"
## Certificate key file.
#key_file="/data/key.pem"
## Options to configure the healthcheck command.
## To set these options from environment variables, use the following format
## (example with http_host): LLDAP_HEALTHCHECK_OPTIONS__HTTP_HOST
[healthcheck_options]
## The host address that the healthcheck should verify for the HTTP server.
## If "http_host" is set to a specific IP address, this must be set to match if the built-in
## healthcheck command is used. Note: if this is an IPv6 address, it must be wrapped in [].
#http_host = "localhost"
## The host address that the healthcheck should verify for the LDAP server.
## If "ldap_host" is set to a specific IP address, this must be set to match if the built-in
## healthcheck command is used.
#ldap_host = "localhost"
+2 -1
View File
@@ -7,6 +7,7 @@ authors.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies]
anyhow = "*"
@@ -33,7 +34,7 @@ features = ["json", "blocking", "rustls-tls"]
[dependencies.ldap3]
version = "*"
default-features = false
features = ["sync", "tls-rustls"]
features = ["sync", "tls-rustls-ring"]
[dependencies.serde]
workspace = true
+2
View File
@@ -0,0 +1,2 @@
[toolchain]
channel = "1.89.0"
+4 -2
View File
@@ -605,6 +605,7 @@ main() {
local group_schema_files=()
local file=''
shopt -s nullglob
[[ -d "$USER_CONFIGS_DIR" ]] && for file in "${USER_CONFIGS_DIR}"/*.json; do
user_config_files+=("$file")
done
@@ -617,6 +618,7 @@ main() {
[[ -d "$GROUP_SCHEMAS_DIR" ]] && for file in "${GROUP_SCHEMAS_DIR}"/*.json; do
group_schema_files+=("$file")
done
shopt -u nullglob
if ! check_configs_validity "${group_config_files[@]}" "${user_config_files[@]}" "${group_schema_files[@]}" "${user_schema_files[@]}"; then
exit 1
@@ -710,9 +712,9 @@ main() {
redundant_users="$(printf '%s' "$redundant_users" | jq --compact-output --arg id "$id" '. - [$id]')"
if [[ "$password_file" != 'null' ]] && [[ "$password_file" != '""' ]]; then
LLDAP_USER_PASSWORD="$(cat $password_file)" "$LLDAP_SET_PASSWORD_PATH" --base-url "$LLDAP_URL" --token "$TOKEN" --username "$id"
"$LLDAP_SET_PASSWORD_PATH" --base-url "$LLDAP_URL" --token "$TOKEN" --username "$id" --password "$(cat $password_file)"
elif [[ "$password" != 'null' ]] && [[ "$password" != '""' ]]; then
LLDAP_USER_PASSWORD="$password" "$LLDAP_SET_PASSWORD_PATH" --base-url "$LLDAP_URL" --token "$TOKEN" --username "$id"
"$LLDAP_SET_PASSWORD_PATH" --base-url "$LLDAP_URL" --token "$TOKEN" --username "$id" --password "$password"
fi
# Process custom attributes
+21 -12
View File
@@ -9,6 +9,7 @@ authors.workspace = true
homepage.workspace = true
license.workspace = true
repository.workspace = true
rust-version.workspace = true
[dependencies]
actix = "0.13"
@@ -17,7 +18,6 @@ actix-http = "3"
actix-rt = "2"
actix-server = "2"
actix-service = "2"
actix-web = "4.3"
actix-web-httpauth = "0.8"
anyhow = "*"
async-trait = "0.1"
@@ -34,12 +34,12 @@ jwt = "0.16"
ldap3_proto = "0.6.0"
log = "*"
rand_chacha = "0.3"
rustls-pemfile = "1"
rustls-pemfile = "2"
serde_json = "1"
sha2 = "0.10"
thiserror = "2"
time = "0.3"
tokio-rustls = "0.23"
tokio-rustls = "0.26"
tokio-stream = "*"
tokio-util = "0.7"
tracing = "*"
@@ -47,7 +47,20 @@ tracing-actix-web = "0.7"
tracing-attributes = "^0.1.21"
tracing-log = "*"
urlencoding = "2"
webpki-roots = "0.22.2"
webpki-roots = "0.26"
[dependencies.actix-web]
features = ["rustls-0_23"]
version = "4.12.1"
[dependencies.rustls]
default-features = false
features = ["ring", "logging", "std", "tls12"]
version = "0.23"
[dependencies.rustls-pki-types]
features = ["std"]
version = "1"
[dependencies.chrono]
features = ["serde"]
@@ -73,7 +86,7 @@ features = ["env-filter", "tracing-log"]
[dependencies.lettre]
features = ["builder", "serde", "smtp-transport", "tokio1-rustls-tls"]
default-features = false
version = "0.10.1"
version = "0.11.19"
[dependencies.lldap_access_control]
path = "../crates/access-control"
@@ -136,7 +149,7 @@ features = ["full"]
version = "1.25"
[dependencies.uuid]
features = ["v1", "v3"]
features = ["v1", "v3", "v4"]
version = "1"
[dependencies.tracing-forest]
@@ -144,7 +157,7 @@ features = ["smallvec", "chrono", "tokio"]
version = "^0.1.6"
[dependencies.actix-tls]
features = ["default", "rustls"]
features = ["default", "rustls-0_23"]
version = "3"
[dependencies.sea-orm]
@@ -162,10 +175,6 @@ version = "0.11"
default-features = false
features = ["rustls-tls-webpki-roots"]
[dependencies.rustls]
version = "0.20"
features = ["dangerous_configuration"]
[dependencies.url]
version = "2"
features = ["serde"]
@@ -184,7 +193,7 @@ version = "0.11"
[dev-dependencies.ldap3]
version = "*"
default-features = false
features = ["sync", "tls-rustls"]
features = ["sync", "tls-rustls-ring"]
[dev-dependencies.lldap_auth]
path = "../crates/auth"
+3
View File
@@ -35,6 +35,7 @@ use std::{
};
use time::ext::NumericalDuration;
use tracing::{debug, info, instrument, warn};
use uuid::Uuid;
type Token<S> = jwt::Token<jwt::Header, JWTClaims, S>;
type SignedToken = Token<jwt::token::Signed>;
@@ -56,6 +57,7 @@ async fn create_jwt<Handler: TcpBackendHandler>(
let claims = JWTClaims {
exp: Utc::now() + chrono::Duration::days(1),
iat: Utc::now(),
jti: Uuid::new_v4(),
user: user.to_string(),
groups: groups
.into_iter()
@@ -189,6 +191,7 @@ where
user.display_name
.as_deref()
.unwrap_or_else(|| user.user_id.as_str()),
user.user_id.as_str(),
user.email.as_str(),
&token,
&data.server_url,
+15
View File
@@ -174,6 +174,9 @@ pub struct RunOpts {
#[clap(flatten)]
pub ldaps_opts: LdapsOpts,
#[clap(flatten)]
pub healthcheck_opts: HealthcheckOpts,
}
#[derive(Debug, Parser, Clone)]
@@ -264,6 +267,18 @@ pub struct ExportGraphQLSchemaOpts {
pub output_file: Option<String>,
}
#[derive(Debug, Parser, Clone)]
#[clap(next_help_heading = Some("HEALTHCHECK"))]
pub struct HealthcheckOpts {
/// Change the HTTP Host to test the health of. Default: "localhost"
#[clap(long, env = "LLDAP_HEALTHCHECK_OPTIONS__HTTP_HOST")]
pub healthcheck_http_host: Option<String>,
/// Change the LDAP Host to test the health of. Default: "localhost"
#[clap(long, env = "LLDAP_HEALTHCHECK_OPTIONS__LDAP_HOST")]
pub healthcheck_ldap_host: Option<String>,
}
pub fn init() -> CLIOpts {
CLIOpts::parse()
}
+146 -103
View File
@@ -1,13 +1,13 @@
use crate::{
cli::{
GeneralConfigOpts, LdapsOpts, RunOpts, SmtpEncryption, SmtpOpts, TestEmailOpts,
TrueFalseAlways,
GeneralConfigOpts, HealthcheckOpts, LdapsOpts, RunOpts, SmtpEncryption, SmtpOpts,
TestEmailOpts, TrueFalseAlways,
},
database_string::DatabaseUrl,
};
use anyhow::{Context, Result, bail};
use anyhow::{Context, Result, anyhow, bail};
use figment::{
Figment,
Figment, Provider,
providers::{Env, Format, Serialized, Toml},
};
use figment_file_provider_adapter::FileAdapter;
@@ -83,6 +83,21 @@ impl std::default::Default for LdapsOptions {
}
}
#[derive(Clone, Debug, Deserialize, Serialize, derive_builder::Builder)]
#[builder(pattern = "owned")]
pub struct HealthcheckOptions {
#[builder(default = r#"String::from("localhost")"#)]
pub http_host: String,
#[builder(default = r#"String::from("localhost")"#)]
pub ldap_host: String,
}
impl std::default::Default for HealthcheckOptions {
fn default() -> Self {
HealthcheckOptionsBuilder::default().build().unwrap()
}
}
#[derive(Clone, Deserialize, Serialize, derive_more::Debug)]
#[debug(r#""{_0}""#)]
pub struct HttpUrl(pub Url);
@@ -138,6 +153,8 @@ pub struct Configuration {
#[serde(skip)]
#[builder(field(private), default = "None")]
server_setup: Option<ServerSetupConfig>,
#[builder(default)]
pub healthcheck_options: HealthcheckOptions,
}
impl std::default::Default for Configuration {
@@ -416,37 +433,36 @@ impl ConfigOverrider for RunOpts {
fn override_config(&self, config: &mut Configuration) {
self.general_config.override_config(config);
if let Some(path) = self.server_key_file.as_ref() {
config.key_file = path.to_string();
}
self.server_key_file
.as_ref()
.inspect(|path| config.key_file = path.to_string());
if let Some(seed) = self.server_key_seed.as_ref() {
config.key_seed = Some(SecUtf8::from(seed));
}
self.server_key_seed
.as_ref()
.inspect(|seed| config.key_seed = Some(SecUtf8::from(seed.as_str())));
if let Some(port) = self.ldap_port {
config.ldap_port = port;
}
self.ldap_port.inspect(|&port| config.ldap_port = port);
if let Some(port) = self.http_port {
config.http_port = port;
}
self.http_port.inspect(|&port| config.http_port = port);
if let Some(url) = self.http_url.as_ref() {
config.http_url = HttpUrl(url.clone());
}
self.http_url
.as_ref()
.inspect(|&url| config.http_url = HttpUrl(url.clone()));
if let Some(database_url) = self.database_url.as_ref() {
config.database_url = database_url.clone();
}
self.database_url
.as_ref()
.inspect(|&database_url| config.database_url = database_url.clone());
if let Some(force_ldap_user_pass_reset) = self.force_ldap_user_pass_reset {
config.force_ldap_user_pass_reset = force_ldap_user_pass_reset;
}
self.force_ldap_user_pass_reset
.inspect(|&force_ldap_user_pass_reset| {
config.force_ldap_user_pass_reset = force_ldap_user_pass_reset;
});
self.force_update_private_key
.inspect(|&force_update_private_key| {
config.force_update_private_key = force_update_private_key;
});
if let Some(force_update_private_key) = self.force_update_private_key {
config.force_update_private_key = force_update_private_key;
}
self.smtp_opts.override_config(config);
self.ldaps_opts.override_config(config);
}
@@ -461,18 +477,19 @@ impl ConfigOverrider for TestEmailOpts {
impl ConfigOverrider for LdapsOpts {
fn override_config(&self, config: &mut Configuration) {
if let Some(enabled) = self.ldaps_enabled {
config.ldaps_options.enabled = enabled;
}
if let Some(port) = self.ldaps_port {
config.ldaps_options.port = port;
}
if let Some(path) = self.ldaps_cert_file.as_ref() {
config.ldaps_options.cert_file.clone_from(path);
}
if let Some(path) = self.ldaps_key_file.as_ref() {
config.ldaps_options.key_file.clone_from(path);
}
self.ldaps_enabled
.inspect(|&enabled| config.ldaps_options.enabled = enabled);
self.ldaps_port
.inspect(|&port| config.ldaps_options.port = port);
self.ldaps_cert_file
.as_ref()
.inspect(|path| config.ldaps_options.cert_file.clone_from(path));
self.ldaps_key_file
.as_ref()
.inspect(|path| config.ldaps_options.key_file.clone_from(path));
}
}
@@ -486,33 +503,52 @@ impl ConfigOverrider for GeneralConfigOpts {
impl ConfigOverrider for SmtpOpts {
fn override_config(&self, config: &mut Configuration) {
if let Some(from) = &self.smtp_from {
config.smtp_options.from = Some(Mailbox(from.clone()));
}
if let Some(reply_to) = &self.smtp_reply_to {
config.smtp_options.reply_to = Some(Mailbox(reply_to.clone()));
}
if let Some(server) = &self.smtp_server {
config.smtp_options.server.clone_from(server);
}
if let Some(port) = self.smtp_port {
config.smtp_options.port = port;
}
if let Some(user) = &self.smtp_user {
config.smtp_options.user.clone_from(user);
}
if let Some(password) = &self.smtp_password {
config.smtp_options.password = SecUtf8::from(password.clone());
}
if let Some(smtp_encryption) = &self.smtp_encryption {
self.smtp_from
.as_ref()
.inspect(|&from| config.smtp_options.from = Some(Mailbox(from.clone())));
self.smtp_reply_to
.as_ref()
.inspect(|&reply_to| config.smtp_options.reply_to = Some(Mailbox(reply_to.clone())));
self.smtp_server
.as_ref()
.inspect(|server| config.smtp_options.server.clone_from(server));
self.smtp_port
.inspect(|&port| config.smtp_options.port = port);
self.smtp_user
.as_ref()
.inspect(|user| config.smtp_options.user.clone_from(user));
self.smtp_password
.as_ref()
.inspect(|&password| config.smtp_options.password = SecUtf8::from(password.clone()));
self.smtp_encryption.as_ref().inspect(|&smtp_encryption| {
config.smtp_options.smtp_encryption = smtp_encryption.clone();
}
if let Some(tls_required) = self.smtp_tls_required {
config.smtp_options.tls_required = Some(tls_required);
}
if let Some(enable_password_reset) = self.smtp_enable_password_reset {
config.smtp_options.enable_password_reset = enable_password_reset;
}
});
self.smtp_tls_required
.inspect(|&tls_required| config.smtp_options.tls_required = Some(tls_required));
self.smtp_enable_password_reset
.inspect(|&enable_password_reset| {
config.smtp_options.enable_password_reset = enable_password_reset;
});
}
}
impl ConfigOverrider for HealthcheckOpts {
fn override_config(&self, config: &mut Configuration) {
self.healthcheck_http_host
.as_ref()
.inspect(|host| config.healthcheck_options.http_host.clone_from(host));
self.healthcheck_ldap_host
.as_ref()
.inspect(|host| config.healthcheck_options.ldap_host.clone_from(host));
}
}
@@ -556,6 +592,45 @@ fn expected_keys(dict: &figment::value::Dict) -> HashSet<String> {
keys
}
fn check_for_unexpected_env_variables<P: Provider>(env_variable_provider: P) {
use figment::Profile;
let expected_keys = expected_keys(
&Figment::from(Serialized::defaults(
ConfigurationBuilder::default().private_build().unwrap(),
))
.data()
.unwrap()[&Profile::default()],
);
extract_keys(&env_variable_provider.data().unwrap()[&Profile::default()])
.iter()
.filter(|k| !expected_keys.contains(k.as_str()))
.for_each(|k| {
eprintln!("WARNING: Unknown environment variable: {k}");
});
}
fn generate_jwt_sample_error() -> String {
use rand::{Rng, seq::SliceRandom};
struct Symbols;
impl rand::distributions::Distribution<char> for Symbols {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
*b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+,-./:;<=>?_~!@#$%^&*()[]{}:;".choose(rng).unwrap() as char
}
}
format!(
"The JWT secret must be initialized to a random string, preferably at least 32 characters long. \
Either set the `jwt_secret` config value or the `LLDAP_JWT_SECRET` environment variable. \
You can generate the value by running\n\
LC_ALL=C tr -dc 'A-Za-z0-9!#%&'\\''()*+,-./:;<=>?@[\\]^_{{|}}~' </dev/urandom | head -c 32; echo ''\n\
or you can use this random value: {}",
rand::thread_rng()
.sample_iter(&Symbols)
.take(32)
.collect::<String>()
)
}
pub fn init<C>(overrides: C) -> Result<Configuration>
where
C: TopLevelCommandOpts + ConfigOverrider,
@@ -581,22 +656,7 @@ where
if config.verbose {
println!("Configuration: {:#?}", &config);
}
{
use figment::{Profile, Provider};
let expected_keys = expected_keys(
&Figment::from(Serialized::defaults(
ConfigurationBuilder::default().private_build().unwrap(),
))
.data()
.unwrap()[&Profile::default()],
);
extract_keys(&env_variable_provider().data().unwrap()[&Profile::default()])
.iter()
.filter(|k| !expected_keys.contains(k.as_str()))
.for_each(|k| {
eprintln!("WARNING: Unknown environment variable: LLDAP_{k}");
});
}
check_for_unexpected_env_variables(env_variable_provider());
config.server_setup = Some(get_server_setup(
&config.key_file,
config
@@ -606,27 +666,10 @@ where
.unwrap_or_default(),
figment_config,
)?);
if config.jwt_secret.is_none() {
use rand::{Rng, seq::SliceRandom};
struct Symbols;
impl rand::prelude::Distribution<char> for Symbols {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
*b"01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+,-./:;<=>?_~!@#$%^&*()[]{}:;".choose(rng).unwrap() as char
}
}
bail!(
"The JWT secret must be initialized to a random string, preferably at least 32 characters long. \
Either set the `jwt_secret` config value or the `LLDAP_JWT_SECRET` environment variable. \
You can generate the value by running\n\
LC_ALL=C tr -dc 'A-Za-z0-9!#%&'\\''()*+,-./:;<=>?@[\\]^_{{|}}~' </dev/urandom | head -c 32; echo ''\n\
or you can use this random value: {}",
rand::thread_rng()
.sample_iter(&Symbols)
.take(32)
.collect::<String>()
);
}
config
.jwt_secret
.as_ref()
.ok_or_else(|| anyhow!("{}", generate_jwt_sample_error()))?;
if config.smtp_options.tls_required.is_some() {
println!(
"DEPRECATED: smtp_options.tls_required field is deprecated, it never did anything. You can replace it with smtp_options.smtp_encryption."
+86 -51
View File
@@ -1,4 +1,4 @@
use crate::{configuration::LdapsOptions, ldap_server::read_certificates};
use crate::{configuration::LdapsOptions, tls};
use anyhow::{Context, Result, anyhow, bail, ensure};
use futures_util::SinkExt;
use ldap3_proto::{
@@ -8,6 +8,11 @@ use ldap3_proto::{
LdapSearchScope,
},
};
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
use rustls::crypto::{verify_tls12_signature, verify_tls13_signature};
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
use rustls::{DigitallySignedStruct, SignatureScheme};
use std::sync::Arc;
use tokio::net::TcpStream;
use tokio_rustls::TlsConnector as RustlsTlsConnector;
use tokio_util::codec::{FramedRead, FramedWrite};
@@ -70,77 +75,107 @@ where
}
#[instrument(level = "info", err)]
pub async fn check_ldap(port: u16) -> Result<()> {
check_ldap_endpoint(TcpStream::connect(format!("localhost:{port}")).await?).await
}
fn get_root_certificates() -> rustls::RootCertStore {
let mut root_store = rustls::RootCertStore::empty();
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| {
rustls::OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject,
ta.spki,
ta.name_constraints,
)
}));
root_store
pub async fn check_ldap(host: &str, port: u16) -> Result<()> {
check_ldap_endpoint(TcpStream::connect((host, port)).await?).await
}
fn get_tls_connector(ldaps_options: &LdapsOptions) -> Result<RustlsTlsConnector> {
let mut client_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(get_root_certificates())
.with_no_client_auth();
let (certs, _private_key) = read_certificates(ldaps_options)?;
// Check that the server cert is the one in the config file.
let certs = tls::load_certificates(&ldaps_options.cert_file)?;
let target_cert = certs.first().expect("empty certificate chain").clone();
#[derive(Debug)]
struct CertificateVerifier {
certificate: rustls::Certificate,
certificate_path: String,
certificate: CertificateDer<'static>,
}
impl rustls::client::ServerCertVerifier for CertificateVerifier {
impl ServerCertVerifier for CertificateVerifier {
fn verify_server_cert(
&self,
end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
end_entity: &CertificateDer<'_>,
_intermediates: &[CertificateDer<'_>],
_server_name: &ServerName<'_>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> std::result::Result<rustls::client::ServerCertVerified, rustls::Error> {
_now: UnixTime,
) -> Result<ServerCertVerified, rustls::Error> {
if end_entity != &self.certificate {
return Err(rustls::Error::InvalidCertificateData(format!(
"Server certificate doesn't match the one in the config file {}",
&self.certificate_path
)));
return Err(rustls::Error::InvalidCertificate(
rustls::CertificateError::NotValidForName,
));
}
Ok(rustls::client::ServerCertVerified::assertion())
Ok(ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
message: &[u8],
cert: &CertificateDer<'_>,
dss: &DigitallySignedStruct,
) -> Result<HandshakeSignatureValid, rustls::Error> {
verify_tls12_signature(
message,
cert,
dss,
&rustls::crypto::ring::default_provider().signature_verification_algorithms,
)
}
fn verify_tls13_signature(
&self,
message: &[u8],
cert: &CertificateDer<'_>,
dss: &DigitallySignedStruct,
) -> Result<HandshakeSignatureValid, rustls::Error> {
verify_tls13_signature(
message,
cert,
dss,
&rustls::crypto::ring::default_provider().signature_verification_algorithms,
)
}
fn supported_verify_schemes(&self) -> Vec<SignatureScheme> {
rustls::crypto::ring::default_provider()
.signature_verification_algorithms
.supported_schemes()
}
}
let mut dangerous_config = rustls::client::DangerousClientConfig {
cfg: &mut client_config,
};
dangerous_config.set_certificate_verifier(std::sync::Arc::new(CertificateVerifier {
certificate: certs.first().expect("empty certificate chain").clone(),
certificate_path: ldaps_options.cert_file.clone(),
}));
Ok(std::sync::Arc::new(client_config).into())
let verifier = Arc::new(CertificateVerifier {
certificate: target_cert,
});
let client_config = rustls::ClientConfig::builder_with_provider(
rustls::crypto::ring::default_provider().into(),
)
.with_safe_default_protocol_versions()
.context("Failed to set default protocol versions")?
.dangerous()
.with_custom_certificate_verifier(verifier)
.with_no_client_auth();
Ok(Arc::new(client_config).into())
}
#[instrument(skip_all, level = "info", err, fields(port = %ldaps_options.port))]
pub async fn check_ldaps(ldaps_options: &LdapsOptions) -> Result<()> {
#[instrument(skip_all, level = "info", err, fields(host = %host, port = %ldaps_options.port))]
pub async fn check_ldaps(host: &str, ldaps_options: &LdapsOptions) -> Result<()> {
if !ldaps_options.enabled {
info!("LDAPS not enabled");
return Ok(());
};
let tls_connector =
get_tls_connector(ldaps_options).context("while preparing the tls connection")?;
let url = format!("localhost:{}", ldaps_options.port);
let domain = match host.parse::<std::net::IpAddr>() {
Ok(ip) => ServerName::IpAddress(ip.into()),
Err(_) => ServerName::try_from(host.to_string())
.map_err(|_| anyhow!("Invalid DNS name: {}", host))?,
};
check_ldap_endpoint(
tls_connector
.connect(
rustls::ServerName::try_from("localhost")
.context("while parsing the server name")?,
TcpStream::connect(&url)
domain,
TcpStream::connect((host, ldaps_options.port))
.await
.context("while connecting TCP")?,
)
@@ -151,8 +186,8 @@ pub async fn check_ldaps(ldaps_options: &LdapsOptions) -> Result<()> {
}
#[instrument(level = "info", err)]
pub async fn check_api(port: u16) -> Result<()> {
reqwest::get(format!("http://localhost:{port}/health"))
pub async fn check_api(host: &str, port: u16) -> Result<()> {
reqwest::get(format!("http://{host}:{port}/health"))
.await?
.error_for_status()?;
info!("Success");
+32 -81
View File
@@ -1,15 +1,14 @@
use crate::configuration::{Configuration, LdapsOptions};
use crate::tls;
use actix_rt::net::TcpStream;
use actix_server::ServerBuilder;
use actix_service::{ServiceFactoryExt, fn_service};
use anyhow::{Context, Result, anyhow};
use anyhow::{Context, Result};
use ldap3_proto::{LdapCodec, control::LdapControl, proto::LdapMsg, proto::LdapOp};
use lldap_access_control::AccessControlledBackendHandler;
use lldap_domain::types::AttributeName;
use lldap_domain_handlers::handler::{BackendHandler, LoginHandler};
use lldap_ldap::LdapHandler;
use lldap_ldap::{LdapHandler, LdapInfo};
use lldap_opaque_handler::OpaqueHandler;
use rustls::PrivateKey;
use tokio_rustls::TlsAcceptor as RustlsTlsAcceptor;
use tokio_util::codec::{FramedRead, FramedWrite};
use tracing::{debug, error, info, instrument};
@@ -71,9 +70,7 @@ where
async fn handle_ldap_stream<Stream, Backend>(
stream: Stream,
backend_handler: Backend,
ldap_base_dn: String,
ignored_user_attributes: Vec<AttributeName>,
ignored_group_attributes: Vec<AttributeName>,
ldap_info: &'static LdapInfo,
) -> Result<Stream>
where
Backend: BackendHandler + LoginHandler + OpaqueHandler + 'static,
@@ -88,9 +85,7 @@ where
let session_uuid = Uuid::new_v4();
let mut session = LdapHandler::new(
AccessControlledBackendHandler::new(backend_handler),
ldap_base_dn,
ignored_user_attributes,
ignored_group_attributes,
ldap_info,
session_uuid,
);
@@ -107,55 +102,18 @@ where
Ok(requests.into_inner().unsplit(resp.into_inner()))
}
fn read_private_key(key_file: &str) -> Result<PrivateKey> {
use rustls_pemfile::{ec_private_keys, pkcs8_private_keys, rsa_private_keys};
use std::{fs::File, io::BufReader};
pkcs8_private_keys(&mut BufReader::new(File::open(key_file)?))
.map_err(anyhow::Error::from)
.and_then(|keys| {
keys.into_iter()
.next()
.ok_or_else(|| anyhow!("No PKCS8 key"))
})
.or_else(|_| {
rsa_private_keys(&mut BufReader::new(File::open(key_file)?))
.map_err(anyhow::Error::from)
.and_then(|keys| {
keys.into_iter()
.next()
.ok_or_else(|| anyhow!("No PKCS1 key"))
})
})
.or_else(|_| {
ec_private_keys(&mut BufReader::new(File::open(key_file)?))
.map_err(anyhow::Error::from)
.and_then(|keys| keys.into_iter().next().ok_or_else(|| anyhow!("No EC key")))
})
.with_context(|| {
format!("Cannot read either PKCS1, PKCS8 or EC private key from {key_file}")
})
.map(rustls::PrivateKey)
}
pub fn read_certificates(
ldaps_options: &LdapsOptions,
) -> Result<(Vec<rustls::Certificate>, rustls::PrivateKey)> {
use std::{fs::File, io::BufReader};
let certs = rustls_pemfile::certs(&mut BufReader::new(File::open(&ldaps_options.cert_file)?))?
.into_iter()
.map(rustls::Certificate)
.collect::<Vec<_>>();
let private_key = read_private_key(&ldaps_options.key_file)?;
Ok((certs, private_key))
}
fn get_tls_acceptor(ldaps_options: &LdapsOptions) -> Result<RustlsTlsAcceptor> {
let (certs, private_key) = read_certificates(ldaps_options)?;
let certs = tls::load_certificates(&ldaps_options.cert_file)?;
let private_key = tls::load_private_key(&ldaps_options.key_file)?;
let server_config = std::sync::Arc::new(
rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, private_key)?,
rustls::ServerConfig::builder_with_provider(
rustls::crypto::ring::default_provider().into(),
)
.with_safe_default_protocol_versions()
.context("Failed to set default protocol versions")?
.with_no_client_auth()
.with_single_cert(certs, private_key)?,
);
Ok(server_config.into())
}
@@ -170,9 +128,19 @@ where
{
let context = (
backend_handler,
config.ldap_base_dn.clone(),
config.ignored_user_attributes.clone(),
config.ignored_group_attributes.clone(),
Box::leak(Box::new(
LdapInfo::new(
&config.ldap_base_dn,
config.ignored_user_attributes.clone(),
config.ignored_group_attributes.clone(),
)
.with_context(|| {
format!(
"Invalid value for ldap_base_dn in configuration: {}",
&config.ldap_base_dn
)
})?,
)) as &'static LdapInfo,
);
let context_for_tls = context.clone();
@@ -182,15 +150,8 @@ where
fn_service(move |stream: TcpStream| {
let context = context.clone();
async move {
let (handler, base_dn, ignored_user_attributes, ignored_group_attributes) = context;
handle_ldap_stream(
stream,
handler,
base_dn,
ignored_user_attributes,
ignored_group_attributes,
)
.await
let (handler, ldap_info) = context;
handle_ldap_stream(stream, handler, ldap_info).await
}
})
.map_err(|err: anyhow::Error| error!("[LDAP] Service Error: {:#}", err))
@@ -211,19 +172,9 @@ where
fn_service(move |stream: TcpStream| {
let tls_context = tls_context.clone();
async move {
let (
(handler, base_dn, ignored_user_attributes, ignored_group_attributes),
tls_acceptor,
) = tls_context;
let ((handler, ldap_info), tls_acceptor) = tls_context;
let tls_stream = tls_acceptor.accept(stream).await?;
handle_ldap_stream(
tls_stream,
handler,
base_dn,
ignored_user_attributes,
ignored_group_attributes,
)
.await
handle_ldap_stream(tls_stream, handler, ldap_info).await
}
})
.map_err(|err: anyhow::Error| error!("[LDAPS] Service Error: {:#}", err))
+5 -1
View File
@@ -80,6 +80,7 @@ async fn send_email(
}
pub async fn send_password_reset_email(
display_name: &str,
username: &str,
to: &str,
token: &str,
@@ -93,7 +94,10 @@ pub async fn send_password_reset_email(
.unwrap()
.extend(["reset-password", "step2", token]);
let body = format!(
"Hello {username},
"Hello {display_name},
Your username is: \"{username}\"
This email has been sent to you in order to validate your identity.
If you did not initiate the process your credentials might have been
compromised. You should reset your password and contact an administrator.
+27 -9
View File
@@ -17,6 +17,7 @@ mod mail;
mod sql_tcp_backend_handler;
mod tcp_backend_handler;
mod tcp_server;
mod tls;
use crate::{
cli::{Command, RunOpts, TestEmailOpts},
@@ -125,7 +126,7 @@ async fn setup_sql_tables(database_url: &DatabaseUrl) -> Result<DatabaseConnecti
}
#[instrument(skip_all)]
async fn set_up_server(config: Configuration) -> Result<ServerBuilder> {
async fn set_up_server(config: Configuration) -> Result<(ServerBuilder, DatabaseConnection)> {
info!("Starting LLDAP version {}", env!("CARGO_PKG_VERSION"));
let sql_pool = setup_sql_tables(&config.database_url).await?;
@@ -214,9 +215,9 @@ async fn set_up_server(config: Configuration) -> Result<ServerBuilder> {
.await
.context("while binding the TCP server")?;
// Run every hour.
let scheduler = Scheduler::new("0 0 * * * * *", sql_pool);
let scheduler = Scheduler::new("0 0 * * * * *", sql_pool.clone());
scheduler.start();
Ok(server_builder)
Ok((server_builder, sql_pool))
}
async fn run_server_command(opts: RunOpts) -> Result<()> {
@@ -225,9 +226,14 @@ async fn run_server_command(opts: RunOpts) -> Result<()> {
let config = configuration::init(opts)?;
logging::init(&config)?;
let server = set_up_server(config).await?.workers(1);
let (server, sql_pool) = set_up_server(config).await?;
let server = server.workers(1);
server.run().await.context("while starting the server")
let result = server.run().await.context("while starting the server");
if let Err(e) = sql_pool.close().await {
error!("Error closing database connection pool: {}", e);
}
result
}
async fn send_test_email_command(opts: TestEmailOpts) -> Result<()> {
@@ -250,9 +256,18 @@ async fn run_healthcheck(opts: RunOpts) -> Result<()> {
use tokio::time::timeout;
let delay = Duration::from_millis(3000);
let (ldap, ldaps, api) = tokio::join!(
timeout(delay, healthcheck::check_ldap(config.ldap_port)),
timeout(delay, healthcheck::check_ldaps(&config.ldaps_options)),
timeout(delay, healthcheck::check_api(config.http_port)),
timeout(
delay,
healthcheck::check_ldap(&config.healthcheck_options.ldap_host, config.ldap_port)
),
timeout(
delay,
healthcheck::check_ldaps(&config.healthcheck_options.ldap_host, &config.ldaps_options)
),
timeout(
delay,
healthcheck::check_api(&config.healthcheck_options.http_host, config.http_port)
),
);
let failure = [ldap, ldaps, api]
@@ -275,8 +290,11 @@ async fn create_schema_command(opts: RunOpts) -> Result<()> {
debug!("CLI: {:#?}", &opts);
let config = configuration::init(opts)?;
logging::init(&config)?;
setup_sql_tables(&config.database_url).await?;
let sql_pool = setup_sql_tables(&config.database_url).await?;
info!("Schema created successfully.");
if let Err(e) = sql_pool.close().await {
error!("Error closing database connection pool: {}", e);
}
Ok(())
}
+1
View File
@@ -12,3 +12,4 @@ pub mod mail;
pub mod sql_tcp_backend_handler;
pub mod tcp_backend_handler;
pub mod tcp_server;
pub mod tls;
+20
View File
@@ -0,0 +1,20 @@
use anyhow::{Context, Result, anyhow};
use rustls::pki_types::{CertificateDer, PrivateKeyDer, pem::PemObject};
pub fn load_certificates(filename: &str) -> Result<Vec<CertificateDer<'static>>> {
let certs = CertificateDer::pem_file_iter(filename)
.with_context(|| format!("Unable to open or read certificate file: {}", filename))?
.collect::<Result<Vec<_>, _>>()
.with_context(|| format!("Error parsing certificates in {}", filename))?;
if certs.is_empty() {
return Err(anyhow!("No certificates found in {}", filename));
}
Ok(certs)
}
pub fn load_private_key(filename: &str) -> Result<PrivateKeyDer<'static>> {
PrivateKeyDer::from_pem_file(filename)
.with_context(|| format!("Unable to load private key from {}", filename))
}

Some files were not shown because too many files have changed in this diff Show More