diff --git a/.github/workflows/pr-quality.yml b/.github/workflows/pr-quality.yml
new file mode 100644
index 00000000..545f6d81
--- /dev/null
+++ b/.github/workflows/pr-quality.yml
@@ -0,0 +1,106 @@
+name: PR Quality
+
+permissions:
+ contents: read
+ issues: read
+ pull-requests: write
+
+on:
+ pull_request_target:
+ types: [opened, reopened]
+
+jobs:
+ pr-quality:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: peakoss/anti-slop@v0
+ with:
+ # General Settings
+ max-failures: 4
+
+ # PR Branch Checks
+ allowed-target-branches: "main"
+ blocked-target-branches: ""
+ allowed-source-branches: ""
+ blocked-source-branches: ""
+
+ # PR Quality Checks
+ max-negative-reactions: 0
+ require-maintainer-can-modify: true
+
+ # PR Title Checks
+ require-conventional-title: true
+
+ # PR Description Checks
+ require-description: true
+ max-description-length: 2500
+ max-emoji-count: 0
+ max-code-references: 0
+ require-linked-issue: false
+ blocked-terms: ""
+ blocked-issue-numbers: ""
+
+ # PR Template Checks
+ require-pr-template: true
+ strict-pr-template-sections: ""
+ optional-pr-template-sections: "Issues"
+ max-additional-pr-template-sections: 3
+
+ # Commit Message Checks
+ max-commit-message-length: 500
+ require-conventional-commits: false
+ require-commit-author-match: true
+ blocked-commit-authors: ""
+
+ # File Checks
+ allowed-file-extensions: ""
+ allowed-paths: ""
+ blocked-paths: |
+ SECURITY.md
+ LICENSE
+ require-final-newline: false
+ max-added-comments: 0
+
+ # User Checks
+ detect-spam-usernames: true
+ min-account-age: 30
+ max-daily-forks: 7
+ min-profile-completeness: 4
+
+ # Merge Checks
+ min-repo-merged-prs: 0
+ min-repo-merge-ratio: 0
+ min-global-merge-ratio: 30
+ global-merge-ratio-exclude-own: false
+
+ # Exemptions
+ exempt-draft-prs: false
+ exempt-bots: |
+ actions-user
+ dependabot[bot]
+ renovate[bot]
+ github-actions[bot]
+ exempt-users: ""
+ exempt-author-association: "OWNER,MEMBER,COLLABORATOR"
+ exempt-label: "quality/exempt"
+ exempt-pr-label: ""
+ exempt-all-milestones: false
+ exempt-all-pr-milestones: false
+ exempt-milestones: ""
+ exempt-pr-milestones: ""
+
+ # PR Success Actions
+ success-add-pr-labels: "quality/verified"
+
+ # PR Failure Actions
+ failure-remove-pr-labels: ""
+ failure-remove-all-pr-labels: true
+ failure-add-pr-labels: "quality/rejected"
+ failure-pr-message: |
+ This PR did not pass quality checks so it will be closed.
+ See the [workflow run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }}) for details on which checks failed.
+
+ If you believe this is a mistake please let us know.
+
+ close-pr: true
+ lock-pr: false
diff --git a/.version b/.version
index 276cbf9e..197c4d5c 100644
--- a/.version
+++ b/.version
@@ -1 +1 @@
-2.3.0
+2.4.0
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 715ee6ce..b5537486 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,35 @@
+## v2.4.0
+
+### Bug Fixes
+
+- improve wildcard matching by using `go-urlpattern` ([#1332](https://github.com/pocket-id/pocket-id/pull/1332) by @stonith404)
+- federated client credentials not working if sub ≠ client_id ([#1342](https://github.com/pocket-id/pocket-id/pull/1342) by @ItalyPaleAle)
+- handle IPv6 addresses in callback URLs ([#1355](https://github.com/pocket-id/pocket-id/pull/1355) by @ItalyPaleAle)
+- wildcard callback URLs blocked by browser-native URL validation ([#1359](https://github.com/pocket-id/pocket-id/pull/1359) by @Copilot)
+- one-time-access-token route should get user ID from URL only ([#1358](https://github.com/pocket-id/pocket-id/pull/1358) by @ItalyPaleAle)
+- various fixes in background jobs ([#1362](https://github.com/pocket-id/pocket-id/pull/1362) by @ItalyPaleAle)
+- use URL keyboard type for callback URL inputs ([a675d07](https://github.com/pocket-id/pocket-id/commit/a675d075d1ab9b7ff8160f1cfc35bc0ea1f1980a) by @stonith404)
+
+### Features
+
+- allow first name and display name to be optional ([#1288](https://github.com/pocket-id/pocket-id/pull/1288) by @taoso)
+
+### Other
+
+- bump svelte from 5.53.2 to 5.53.5 in the npm_and_yarn group across 1 directory ([#1348](https://github.com/pocket-id/pocket-id/pull/1348) by @dependabot[bot])
+- bump @sveltejs/kit from 2.53.0 to 2.53.3 in the npm_and_yarn group across 1 directory ([#1349](https://github.com/pocket-id/pocket-id/pull/1349) by @dependabot[bot])
+- update AAGUIDs ([#1354](https://github.com/pocket-id/pocket-id/pull/1354) by @github-actions[bot])
+- add Português files ([01141b8](https://github.com/pocket-id/pocket-id/commit/01141b8c0f2e96a40fd876d3206e49a694fd12c4) by @kmendell)
+- add Latvian files ([e0fc4cc](https://github.com/pocket-id/pocket-id/commit/e0fc4cc01bd51e5a97e46aad78a493a668049220) by @kmendell)
+- fix wrong seed data ([e7bd66d](https://github.com/pocket-id/pocket-id/commit/e7bd66d1a77c89dde542b4385ba01dc0d432e434) by @stonith404)
+- fix wrong seed data in `database.json` ([f4eb8db](https://github.com/pocket-id/pocket-id/commit/f4eb8db50993edacd90e919b39a5c6d9dd4924c7) by @stonith404)
+
+### Performance Improvements
+
+- frontend performance optimizations ([#1344](https://github.com/pocket-id/pocket-id/pull/1344) by @ItalyPaleAle)
+
+**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v2.3.0...v2.4.0
+
## v2.3.0
### Bug Fixes
diff --git a/backend/internal/bootstrap/bootstrap.go b/backend/internal/bootstrap/bootstrap.go
index 2b033684..692e6b0b 100644
--- a/backend/internal/bootstrap/bootstrap.go
+++ b/backend/internal/bootstrap/bootstrap.go
@@ -2,6 +2,7 @@ package bootstrap
import (
"context"
+ "errors"
"fmt"
"log/slog"
"time"
@@ -11,6 +12,7 @@ import (
"github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/internal/job"
+ "github.com/pocket-id/pocket-id/backend/internal/service"
"github.com/pocket-id/pocket-id/backend/internal/storage"
"github.com/pocket-id/pocket-id/backend/internal/utils"
)
@@ -60,7 +62,9 @@ func Bootstrap(ctx context.Context) error {
}
waitUntil, err := svc.appLockService.Acquire(ctx, false)
- if err != nil {
+ if errors.Is(err, service.ErrLockUnavailable) {
+ return errors.New("it appears that there's already one instance of Pocket ID running; running multiple replicas of Pocket ID is currently not supported")
+ } else if err != nil {
return fmt.Errorf("failed to acquire application lock: %w", err)
}
diff --git a/backend/internal/cmds/import.go b/backend/internal/cmds/import.go
index 974d049e..0f51c492 100644
--- a/backend/internal/cmds/import.go
+++ b/backend/internal/cmds/import.go
@@ -119,11 +119,10 @@ func acquireImportLock(ctx context.Context, db *gorm.DB, force bool) error {
defer cancel()
waitUntil, err := appLockService.Acquire(opCtx, force)
- if err != nil {
- if errors.Is(err, service.ErrLockUnavailable) {
- //nolint:staticcheck
- return errors.New("Pocket ID must be stopped before importing data; please stop the running instance or run with --forcefully-acquire-lock to terminate the other instance")
- }
+ if errors.Is(err, service.ErrLockUnavailable) {
+ //nolint:staticcheck
+ return errors.New("Pocket ID must be stopped before importing data; please stop the running instance or run with --forcefully-acquire-lock to terminate the other instance")
+ } else if err != nil {
return fmt.Errorf("failed to acquire application lock: %w", err)
}
diff --git a/backend/internal/common/errors.go b/backend/internal/common/errors.go
index 9a0b41b0..eb5e57ed 100644
--- a/backend/internal/common/errors.go
+++ b/backend/internal/common/errors.go
@@ -139,6 +139,20 @@ func (e *TooManyRequestsError) Error() string {
}
func (e *TooManyRequestsError) HttpStatusCode() int { return http.StatusTooManyRequests }
+type UserIdNotProvidedError struct{}
+
+func (e *UserIdNotProvidedError) Error() string {
+ return "User id not provided"
+}
+func (e *UserIdNotProvidedError) HttpStatusCode() int { return http.StatusBadRequest }
+
+type UserNotFoundError struct{}
+
+func (e *UserNotFoundError) Error() string {
+ return "User not found"
+}
+func (e *UserNotFoundError) HttpStatusCode() int { return http.StatusNotFound }
+
type ClientIdOrSecretNotProvidedError struct{}
func (e *ClientIdOrSecretNotProvidedError) Error() string {
diff --git a/backend/internal/controller/user_controller.go b/backend/internal/controller/user_controller.go
index 78a5563c..b38eeabc 100644
--- a/backend/internal/controller/user_controller.go
+++ b/backend/internal/controller/user_controller.go
@@ -4,6 +4,7 @@ import (
"net/http"
"time"
+ "github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/internal/utils/cookie"
"github.com/gin-gonic/gin"
@@ -322,22 +323,34 @@ func (uc *UserController) updateCurrentUserProfilePictureHandler(c *gin.Context)
func (uc *UserController) createOneTimeAccessTokenHandler(c *gin.Context, own bool) {
var input dto.OneTimeAccessTokenCreateDto
- if err := c.ShouldBindJSON(&input); err != nil {
+ err := c.ShouldBindJSON(&input)
+ if err != nil {
_ = c.Error(err)
return
}
- var ttl time.Duration
+ var (
+ userID string
+ ttl time.Duration
+ )
if own {
- input.UserID = c.GetString("userID")
+ // Get user ID from context and force the default TTL
+ userID = c.GetString("userID")
ttl = defaultOneTimeAccessTokenDuration
} else {
+ // Get user ID from URL parameter, and optional TTL from body
+ userID = c.Param("id")
ttl = input.TTL.Duration
if ttl <= 0 {
ttl = defaultOneTimeAccessTokenDuration
}
}
- token, err := uc.oneTimeAccessService.CreateOneTimeAccessToken(c.Request.Context(), input.UserID, ttl)
+ if userID == "" {
+ _ = c.Error(&common.UserIdNotProvidedError{})
+ return
+ }
+
+ token, err := uc.oneTimeAccessService.CreateOneTimeAccessToken(c.Request.Context(), userID, ttl)
if err != nil {
_ = c.Error(err)
return
diff --git a/backend/internal/dto/one_time_access_dto.go b/backend/internal/dto/one_time_access_dto.go
index 336def70..a99dc5ac 100644
--- a/backend/internal/dto/one_time_access_dto.go
+++ b/backend/internal/dto/one_time_access_dto.go
@@ -3,8 +3,7 @@ package dto
import "github.com/pocket-id/pocket-id/backend/internal/utils"
type OneTimeAccessTokenCreateDto struct {
- UserID string `json:"userId"`
- TTL utils.JSONDuration `json:"ttl" binding:"ttl"`
+ TTL utils.JSONDuration `json:"ttl" binding:"ttl"`
}
type OneTimeAccessEmailAsUnauthenticatedUserDto struct {
diff --git a/backend/internal/job/analytics_job.go b/backend/internal/job/analytics_job.go
index f67e2042..c2e658df 100644
--- a/backend/internal/job/analytics_job.go
+++ b/backend/internal/job/analytics_job.go
@@ -28,7 +28,7 @@ func (s *Scheduler) RegisterAnalyticsJob(ctx context.Context, appConfig *service
appConfig: appConfig,
httpClient: httpClient,
}
- return s.RegisterJob(ctx, "SendHeartbeat", gocron.DurationJob(24*time.Hour), jobs.sendHeartbeat, true)
+ return s.RegisterJob(ctx, "SendHeartbeat", gocron.DurationJob(24*time.Hour), jobs.sendHeartbeat, service.RegisterJobOpts{RunImmediately: true})
}
type AnalyticsJob struct {
diff --git a/backend/internal/job/api_key_expiry_job.go b/backend/internal/job/api_key_expiry_job.go
index 6524089b..2481010e 100644
--- a/backend/internal/job/api_key_expiry_job.go
+++ b/backend/internal/job/api_key_expiry_job.go
@@ -22,7 +22,7 @@ func (s *Scheduler) RegisterApiKeyExpiryJob(ctx context.Context, apiKeyService *
}
// Send every day at midnight
- return s.RegisterJob(ctx, "ExpiredApiKeyEmailJob", gocron.CronJob("0 0 * * *", false), jobs.checkAndNotifyExpiringApiKeys, false)
+ return s.RegisterJob(ctx, "ExpiredApiKeyEmailJob", gocron.CronJob("0 0 * * *", false), jobs.checkAndNotifyExpiringApiKeys, service.RegisterJobOpts{})
}
func (j *ApiKeyEmailJobs) checkAndNotifyExpiringApiKeys(ctx context.Context) error {
@@ -42,7 +42,11 @@ func (j *ApiKeyEmailJobs) checkAndNotifyExpiringApiKeys(ctx context.Context) err
}
err = j.apiKeyService.SendApiKeyExpiringSoonEmail(ctx, key)
if err != nil {
- slog.ErrorContext(ctx, "Failed to send expiring API key notification email", slog.String("key", key.ID), slog.Any("error", err))
+ slog.ErrorContext(ctx, "Failed to send expiring API key notification email",
+ slog.String("key", key.ID),
+ slog.String("user", key.User.ID),
+ slog.Any("error", err),
+ )
}
}
return nil
diff --git a/backend/internal/job/db_cleanup_job.go b/backend/internal/job/db_cleanup_job.go
index fca96516..4872fae1 100644
--- a/backend/internal/job/db_cleanup_job.go
+++ b/backend/internal/job/db_cleanup_job.go
@@ -7,28 +7,37 @@ import (
"log/slog"
"time"
- "github.com/go-co-op/gocron/v2"
+ backoff "github.com/cenkalti/backoff/v5"
"gorm.io/gorm"
"github.com/pocket-id/pocket-id/backend/internal/common"
"github.com/pocket-id/pocket-id/backend/internal/model"
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
+ "github.com/pocket-id/pocket-id/backend/internal/service"
)
func (s *Scheduler) RegisterDbCleanupJobs(ctx context.Context, db *gorm.DB) error {
jobs := &DbCleanupJobs{db: db}
- // Run every 24 hours (but with some jitter so they don't run at the exact same time), and now
- def := gocron.DurationRandomJob(24*time.Hour-2*time.Minute, 24*time.Hour+2*time.Minute)
+ newBackOff := func() *backoff.ExponentialBackOff {
+ bo := backoff.NewExponentialBackOff()
+ bo.Multiplier = 4
+ bo.RandomizationFactor = 0.1
+ bo.InitialInterval = time.Second
+ bo.MaxInterval = 45 * time.Second
+ return bo
+ }
+
+ // Use exponential backoff for each DB cleanup job so transient query failures are retried automatically rather than causing an immediate job failure
return errors.Join(
- s.RegisterJob(ctx, "ClearWebauthnSessions", def, jobs.clearWebauthnSessions, true),
- s.RegisterJob(ctx, "ClearOneTimeAccessTokens", def, jobs.clearOneTimeAccessTokens, true),
- s.RegisterJob(ctx, "ClearSignupTokens", def, jobs.clearSignupTokens, true),
- s.RegisterJob(ctx, "ClearEmailVerificationTokens", def, jobs.clearEmailVerificationTokens, true),
- s.RegisterJob(ctx, "ClearOidcAuthorizationCodes", def, jobs.clearOidcAuthorizationCodes, true),
- s.RegisterJob(ctx, "ClearOidcRefreshTokens", def, jobs.clearOidcRefreshTokens, true),
- s.RegisterJob(ctx, "ClearReauthenticationTokens", def, jobs.clearReauthenticationTokens, true),
- s.RegisterJob(ctx, "ClearAuditLogs", def, jobs.clearAuditLogs, true),
+ s.RegisterJob(ctx, "ClearWebauthnSessions", jobDefWithJitter(24*time.Hour), jobs.clearWebauthnSessions, service.RegisterJobOpts{RunImmediately: true, BackOff: newBackOff()}),
+ s.RegisterJob(ctx, "ClearOneTimeAccessTokens", jobDefWithJitter(24*time.Hour), jobs.clearOneTimeAccessTokens, service.RegisterJobOpts{RunImmediately: true, BackOff: newBackOff()}),
+ s.RegisterJob(ctx, "ClearSignupTokens", jobDefWithJitter(24*time.Hour), jobs.clearSignupTokens, service.RegisterJobOpts{RunImmediately: true, BackOff: newBackOff()}),
+ s.RegisterJob(ctx, "ClearEmailVerificationTokens", jobDefWithJitter(24*time.Hour), jobs.clearEmailVerificationTokens, service.RegisterJobOpts{RunImmediately: true, BackOff: newBackOff()}),
+ s.RegisterJob(ctx, "ClearOidcAuthorizationCodes", jobDefWithJitter(24*time.Hour), jobs.clearOidcAuthorizationCodes, service.RegisterJobOpts{RunImmediately: true, BackOff: newBackOff()}),
+ s.RegisterJob(ctx, "ClearOidcRefreshTokens", jobDefWithJitter(24*time.Hour), jobs.clearOidcRefreshTokens, service.RegisterJobOpts{RunImmediately: true, BackOff: newBackOff()}),
+ s.RegisterJob(ctx, "ClearReauthenticationTokens", jobDefWithJitter(24*time.Hour), jobs.clearReauthenticationTokens, service.RegisterJobOpts{RunImmediately: true, BackOff: newBackOff()}),
+ s.RegisterJob(ctx, "ClearAuditLogs", jobDefWithJitter(24*time.Hour), jobs.clearAuditLogs, service.RegisterJobOpts{RunImmediately: true, BackOff: newBackOff()}),
)
}
diff --git a/backend/internal/job/file_cleanup_job.go b/backend/internal/job/file_cleanup_job.go
index 2b141dac..71f0dd34 100644
--- a/backend/internal/job/file_cleanup_job.go
+++ b/backend/internal/job/file_cleanup_job.go
@@ -13,20 +13,26 @@ import (
"gorm.io/gorm"
"github.com/pocket-id/pocket-id/backend/internal/model"
+ "github.com/pocket-id/pocket-id/backend/internal/service"
"github.com/pocket-id/pocket-id/backend/internal/storage"
)
func (s *Scheduler) RegisterFileCleanupJobs(ctx context.Context, db *gorm.DB, fileStorage storage.FileStorage) error {
jobs := &FileCleanupJobs{db: db, fileStorage: fileStorage}
- err := s.RegisterJob(ctx, "ClearUnusedDefaultProfilePictures", gocron.DurationJob(24*time.Hour), jobs.clearUnusedDefaultProfilePictures, false)
+ var errs []error
+ errs = append(errs,
+ s.RegisterJob(ctx, "ClearUnusedDefaultProfilePictures", gocron.DurationJob(24*time.Hour), jobs.clearUnusedDefaultProfilePictures, service.RegisterJobOpts{}),
+ )
// Only necessary for file system storage
if fileStorage.Type() == storage.TypeFileSystem {
- err = errors.Join(err, s.RegisterJob(ctx, "ClearOrphanedTempFiles", gocron.DurationJob(12*time.Hour), jobs.clearOrphanedTempFiles, true))
+ errs = append(errs,
+ s.RegisterJob(ctx, "ClearOrphanedTempFiles", gocron.DurationJob(12*time.Hour), jobs.clearOrphanedTempFiles, service.RegisterJobOpts{RunImmediately: true}),
+ )
}
- return err
+ return errors.Join(errs...)
}
type FileCleanupJobs struct {
@@ -68,7 +74,8 @@ func (j *FileCleanupJobs) clearUnusedDefaultProfilePictures(ctx context.Context)
// If these initials aren't used by any user, delete the file
if _, ok := initialsInUse[initials]; !ok {
filePath := path.Join(defaultPicturesDir, filename)
- if err := j.fileStorage.Delete(ctx, filePath); err != nil {
+ err = j.fileStorage.Delete(ctx, filePath)
+ if err != nil {
slog.ErrorContext(ctx, "Failed to delete unused default profile picture", slog.String("path", filePath), slog.Any("error", err))
} else {
filesDeleted++
@@ -95,8 +102,9 @@ func (j *FileCleanupJobs) clearOrphanedTempFiles(ctx context.Context) error {
return nil
}
- if err := j.fileStorage.Delete(ctx, p.Path); err != nil {
- slog.ErrorContext(ctx, "Failed to delete temp file", slog.String("path", p.Path), slog.Any("error", err))
+ rErr := j.fileStorage.Delete(ctx, p.Path)
+ if rErr != nil {
+ slog.ErrorContext(ctx, "Failed to delete temp file", slog.String("path", p.Path), slog.Any("error", rErr))
return nil
}
deleted++
diff --git a/backend/internal/job/geoloite_update_job.go b/backend/internal/job/geoloite_update_job.go
index 65353757..7b163a8d 100644
--- a/backend/internal/job/geoloite_update_job.go
+++ b/backend/internal/job/geoloite_update_job.go
@@ -23,7 +23,7 @@ func (s *Scheduler) RegisterGeoLiteUpdateJobs(ctx context.Context, geoLiteServic
jobs := &GeoLiteUpdateJobs{geoLiteService: geoLiteService}
// Run every 24 hours (and right away)
- return s.RegisterJob(ctx, "UpdateGeoLiteDB", gocron.DurationJob(24*time.Hour), jobs.updateGoeLiteDB, true)
+ return s.RegisterJob(ctx, "UpdateGeoLiteDB", gocron.DurationJob(24*time.Hour), jobs.updateGoeLiteDB, service.RegisterJobOpts{RunImmediately: true})
}
func (j *GeoLiteUpdateJobs) updateGoeLiteDB(ctx context.Context) error {
diff --git a/backend/internal/job/ldap_job.go b/backend/internal/job/ldap_job.go
index 33646860..1547d954 100644
--- a/backend/internal/job/ldap_job.go
+++ b/backend/internal/job/ldap_job.go
@@ -4,8 +4,6 @@ import (
"context"
"time"
- "github.com/go-co-op/gocron/v2"
-
"github.com/pocket-id/pocket-id/backend/internal/service"
)
@@ -17,8 +15,8 @@ type LdapJobs struct {
func (s *Scheduler) RegisterLdapJobs(ctx context.Context, ldapService *service.LdapService, appConfigService *service.AppConfigService) error {
jobs := &LdapJobs{ldapService: ldapService, appConfigService: appConfigService}
- // Register the job to run every hour
- return s.RegisterJob(ctx, "SyncLdap", gocron.DurationJob(time.Hour), jobs.syncLdap, true)
+ // Register the job to run every hour (with some jitter)
+ return s.RegisterJob(ctx, "SyncLdap", jobDefWithJitter(time.Hour), jobs.syncLdap, service.RegisterJobOpts{RunImmediately: true})
}
func (j *LdapJobs) syncLdap(ctx context.Context) error {
diff --git a/backend/internal/job/scheduler.go b/backend/internal/job/scheduler.go
index 2a48c2a8..2ef2019b 100644
--- a/backend/internal/job/scheduler.go
+++ b/backend/internal/job/scheduler.go
@@ -5,9 +5,13 @@ import (
"errors"
"fmt"
"log/slog"
+ "time"
+ backoff "github.com/cenkalti/backoff/v5"
"github.com/go-co-op/gocron/v2"
"github.com/google/uuid"
+
+ "github.com/pocket-id/pocket-id/backend/internal/service"
)
type Scheduler struct {
@@ -33,16 +37,12 @@ func (s *Scheduler) RemoveJob(name string) error {
if job.Name() == name {
err := s.scheduler.RemoveJob(job.ID())
if err != nil {
- errs = append(errs, fmt.Errorf("failed to unqueue job %q with ID %q: %w", name, job.ID().String(), err))
+ errs = append(errs, fmt.Errorf("failed to dequeue job %q with ID %q: %w", name, job.ID().String(), err))
}
}
}
- if len(errs) > 0 {
- return errors.Join(errs...)
- }
-
- return nil
+ return errors.Join(errs...)
}
// Run the scheduler.
@@ -64,7 +64,29 @@ func (s *Scheduler) Run(ctx context.Context) error {
return nil
}
-func (s *Scheduler) RegisterJob(ctx context.Context, name string, def gocron.JobDefinition, job func(ctx context.Context) error, runImmediately bool, extraOptions ...gocron.JobOption) error {
+func (s *Scheduler) RegisterJob(ctx context.Context, name string, def gocron.JobDefinition, jobFn func(ctx context.Context) error, opts service.RegisterJobOpts) error {
+ // If a BackOff strategy is provided, wrap the job with retry logic
+ if opts.BackOff != nil {
+ origJob := jobFn
+ jobFn = func(ctx context.Context) error {
+ _, err := backoff.Retry(
+ ctx,
+ func() (struct{}, error) {
+ return struct{}{}, origJob(ctx)
+ },
+ backoff.WithBackOff(opts.BackOff),
+ backoff.WithNotify(func(err error, d time.Duration) {
+ slog.WarnContext(ctx, "Job failed, retrying",
+ slog.String("name", name),
+ slog.Any("error", err),
+ slog.Duration("retryIn", d),
+ )
+ }),
+ )
+ return err
+ }
+ }
+
jobOptions := []gocron.JobOption{
gocron.WithContext(ctx),
gocron.WithName(name),
@@ -91,13 +113,13 @@ func (s *Scheduler) RegisterJob(ctx context.Context, name string, def gocron.Job
),
}
- if runImmediately {
+ if opts.RunImmediately {
jobOptions = append(jobOptions, gocron.JobOption(gocron.WithStartImmediately()))
}
- jobOptions = append(jobOptions, extraOptions...)
+ jobOptions = append(jobOptions, opts.ExtraOptions...)
- _, err := s.scheduler.NewJob(def, gocron.NewTask(job), jobOptions...)
+ _, err := s.scheduler.NewJob(def, gocron.NewTask(jobFn), jobOptions...)
if err != nil {
return fmt.Errorf("failed to register job %q: %w", name, err)
@@ -105,3 +127,9 @@ func (s *Scheduler) RegisterJob(ctx context.Context, name string, def gocron.Job
return nil
}
+
+func jobDefWithJitter(interval time.Duration) gocron.JobDefinition {
+ const jitter = 5 * time.Minute
+
+ return gocron.DurationRandomJob(interval-jitter, interval+jitter)
+}
diff --git a/backend/internal/job/scim_job.go b/backend/internal/job/scim_job.go
index 1ea8ee96..5c4336f6 100644
--- a/backend/internal/job/scim_job.go
+++ b/backend/internal/job/scim_job.go
@@ -16,8 +16,8 @@ type ScimJobs struct {
func (s *Scheduler) RegisterScimJobs(ctx context.Context, scimService *service.ScimService) error {
jobs := &ScimJobs{scimService: scimService}
- // Register the job to run every hour
- return s.RegisterJob(ctx, "SyncScim", gocron.DurationJob(time.Hour), jobs.SyncScim, true)
+ // Register the job to run every hour (with some jitter)
+ return s.RegisterJob(ctx, "SyncScim", gocron.DurationJob(time.Hour), jobs.SyncScim, service.RegisterJobOpts{RunImmediately: true})
}
func (j *ScimJobs) SyncScim(ctx context.Context) error {
diff --git a/backend/internal/service/api_key_service.go b/backend/internal/service/api_key_service.go
index 3c29d6e9..cb409ec5 100644
--- a/backend/internal/service/api_key_service.go
+++ b/backend/internal/service/api_key_service.go
@@ -3,6 +3,7 @@ package service
import (
"context"
"errors"
+ "fmt"
"time"
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
@@ -205,36 +206,33 @@ func (s *ApiKeyService) ListExpiringApiKeys(ctx context.Context, daysAhead int)
}
func (s *ApiKeyService) SendApiKeyExpiringSoonEmail(ctx context.Context, apiKey model.ApiKey) error {
- user := apiKey.User
-
- if user.ID == "" {
- if err := s.db.WithContext(ctx).First(&user, "id = ?", apiKey.UserID).Error; err != nil {
- return err
- }
- }
-
- if user.Email == nil {
+ if apiKey.User.Email == nil {
return &common.UserEmailNotSetError{}
}
err := SendEmail(ctx, s.emailService, email.Address{
- Name: user.FullName(),
- Email: *user.Email,
+ Name: apiKey.User.FullName(),
+ Email: *apiKey.User.Email,
}, ApiKeyExpiringSoonTemplate, &ApiKeyExpiringSoonTemplateData{
ApiKeyName: apiKey.Name,
ExpiresAt: apiKey.ExpiresAt.ToTime(),
- Name: user.FirstName,
+ Name: apiKey.User.FirstName,
})
if err != nil {
- return err
+ return fmt.Errorf("error sending notification email: %w", err)
}
// Mark the API key as having had an expiration email sent
- return s.db.WithContext(ctx).
+ err = s.db.WithContext(ctx).
Model(&model.ApiKey{}).
Where("id = ?", apiKey.ID).
Update("expiration_email_sent", true).
Error
+ if err != nil {
+ return fmt.Errorf("error recording expiration sent email in database: %w", err)
+ }
+
+ return nil
}
func (s *ApiKeyService) initStaticApiKeyUser(ctx context.Context) (user model.User, err error) {
diff --git a/backend/internal/service/app_lock_service.go b/backend/internal/service/app_lock_service.go
index 339e41cd..f1bd0c2a 100644
--- a/backend/internal/service/app_lock_service.go
+++ b/backend/internal/service/app_lock_service.go
@@ -73,7 +73,10 @@ func (lv *lockValue) Unmarshal(raw string) error {
// Acquire obtains the lock. When force is true, the lock is stolen from any existing owner.
// If the lock is forcefully acquired, it blocks until the previous lock has expired.
func (s *AppLockService) Acquire(ctx context.Context, force bool) (waitUntil time.Time, err error) {
- tx := s.db.Begin()
+ tx := s.db.WithContext(ctx).Begin()
+ if tx.Error != nil {
+ return time.Time{}, fmt.Errorf("begin lock transaction: %w", tx.Error)
+ }
defer func() {
tx.Rollback()
}()
@@ -93,7 +96,8 @@ func (s *AppLockService) Acquire(ctx context.Context, force bool) (waitUntil tim
var prevLock lockValue
if prevLockRaw != "" {
- if err := prevLock.Unmarshal(prevLockRaw); err != nil {
+ err = prevLock.Unmarshal(prevLockRaw)
+ if err != nil {
return time.Time{}, fmt.Errorf("decode existing lock value: %w", err)
}
}
@@ -139,7 +143,8 @@ func (s *AppLockService) Acquire(ctx context.Context, force bool) (waitUntil tim
return time.Time{}, fmt.Errorf("lock acquisition failed: %w", res.Error)
}
- if err := tx.Commit().Error; err != nil {
+ err = tx.Commit().Error
+ if err != nil {
return time.Time{}, fmt.Errorf("commit lock acquisition: %w", err)
}
@@ -174,7 +179,8 @@ func (s *AppLockService) RunRenewal(ctx context.Context) error {
case <-ctx.Done():
return nil
case <-ticker.C:
- if err := s.renew(ctx); err != nil {
+ err := s.renew(ctx)
+ if err != nil {
return fmt.Errorf("renew lock: %w", err)
}
}
@@ -183,33 +189,43 @@ func (s *AppLockService) RunRenewal(ctx context.Context) error {
// Release releases the lock if it is held by this process.
func (s *AppLockService) Release(ctx context.Context) error {
- opCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
- defer cancel()
+ db, err := s.db.DB()
+ if err != nil {
+ return fmt.Errorf("failed to get DB connection: %w", err)
+ }
var query string
switch s.db.Name() {
case "sqlite":
query = `
- DELETE FROM kv
- WHERE key = ?
- AND json_extract(value, '$.lock_id') = ?
- `
+DELETE FROM kv
+WHERE key = ?
+ AND json_extract(value, '$.lock_id') = ?
+`
case "postgres":
query = `
- DELETE FROM kv
- WHERE key = $1
- AND value::json->>'lock_id' = $2
- `
+DELETE FROM kv
+WHERE key = $1
+ AND value::json->>'lock_id' = $2
+`
default:
return fmt.Errorf("unsupported database dialect: %s", s.db.Name())
}
- res := s.db.WithContext(opCtx).Exec(query, lockKey, s.lockID)
- if res.Error != nil {
- return fmt.Errorf("release lock failed: %w", res.Error)
+ opCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
+ defer cancel()
+
+ res, err := db.ExecContext(opCtx, query, lockKey, s.lockID)
+ if err != nil {
+ return fmt.Errorf("release lock failed: %w", err)
}
- if res.RowsAffected == 0 {
+ count, err := res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("failed to count affected rows: %w", err)
+ }
+
+ if count == 0 {
slog.Warn("Application lock not held by this process, cannot release",
slog.Int64("process_id", s.processID),
slog.String("host_id", s.hostID),
@@ -225,6 +241,11 @@ func (s *AppLockService) Release(ctx context.Context) error {
// renew tries to renew the lock, retrying up to renewRetries times (sleeping 1s between attempts).
func (s *AppLockService) renew(ctx context.Context) error {
+ db, err := s.db.DB()
+ if err != nil {
+ return fmt.Errorf("failed to get DB connection: %w", err)
+ }
+
var lastErr error
for attempt := 1; attempt <= renewRetries; attempt++ {
now := time.Now()
@@ -246,42 +267,56 @@ func (s *AppLockService) renew(ctx context.Context) error {
switch s.db.Name() {
case "sqlite":
query = `
- UPDATE kv
- SET value = ?
- WHERE key = ?
- AND json_extract(value, '$.lock_id') = ?
- AND json_extract(value, '$.expires_at') > ?
- `
+UPDATE kv
+SET value = ?
+WHERE key = ?
+ AND json_extract(value, '$.lock_id') = ?
+ AND json_extract(value, '$.expires_at') > ?
+`
case "postgres":
query = `
- UPDATE kv
- SET value = $1
- WHERE key = $2
- AND value::json->>'lock_id' = $3
- AND ((value::json->>'expires_at')::bigint > $4)
- `
+UPDATE kv
+SET value = $1
+WHERE key = $2
+ AND value::json->>'lock_id' = $3
+ AND ((value::json->>'expires_at')::bigint > $4)
+`
default:
return fmt.Errorf("unsupported database dialect: %s", s.db.Name())
}
opCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
- res := s.db.WithContext(opCtx).Exec(query, raw, lockKey, s.lockID, nowUnix)
+ res, err := db.ExecContext(opCtx, query, raw, lockKey, s.lockID, nowUnix)
cancel()
- switch {
- case res.Error != nil:
- lastErr = fmt.Errorf("lock renewal failed: %w", res.Error)
- case res.RowsAffected == 0:
- // Must be after checking res.Error
- return ErrLockLost
- default:
+ // Query succeeded, but may have updated 0 rows
+ if err == nil {
+ count, err := res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("failed to count affected rows: %w", err)
+ }
+
+ // If no rows were updated, we lost the lock
+ if count == 0 {
+ return ErrLockLost
+ }
+
+ // All good
slog.Debug("Renewed application lock",
slog.Int64("process_id", s.processID),
slog.String("host_id", s.hostID),
+ slog.Duration("duration", time.Since(now)),
)
return nil
}
+ // If we're here, we have an error that can be retried
+ slog.Debug("Application lock renewal attempt failed",
+ slog.Any("error", err),
+ slog.Duration("duration", time.Since(now)),
+ )
+ lastErr = fmt.Errorf("lock renewal failed: %w", err)
+
// Wait before next attempt or cancel if context is done
if attempt < renewRetries {
select {
diff --git a/backend/internal/service/app_lock_service_test.go b/backend/internal/service/app_lock_service_test.go
index 95b22f51..8f829dff 100644
--- a/backend/internal/service/app_lock_service_test.go
+++ b/backend/internal/service/app_lock_service_test.go
@@ -49,6 +49,23 @@ func readLockValue(t *testing.T, db *gorm.DB) lockValue {
return value
}
+func lockDatabaseForWrite(t *testing.T, db *gorm.DB) *gorm.DB {
+ t.Helper()
+
+ tx := db.Begin()
+ require.NoError(t, tx.Error)
+
+ // Keep a write transaction open to block other queries.
+ err := tx.Exec(
+ `INSERT INTO kv (key, value) VALUES (?, ?) ON CONFLICT(key) DO NOTHING`,
+ lockKey,
+ `{"expires_at":0}`,
+ ).Error
+ require.NoError(t, err)
+
+ return tx
+}
+
func TestAppLockServiceAcquire(t *testing.T) {
t.Run("creates new lock when none exists", func(t *testing.T) {
db := testutils.NewDatabaseForTest(t)
@@ -99,6 +116,66 @@ func TestAppLockServiceAcquire(t *testing.T) {
require.Equal(t, service.hostID, stored.HostID)
require.Greater(t, stored.ExpiresAt, time.Now().Unix())
})
+
+ t.Run("force acquisition returns wait duration when stealing active lock", func(t *testing.T) {
+ db := testutils.NewDatabaseForTest(t)
+ service := newTestAppLockService(t, db)
+
+ existing := lockValue{
+ ProcessID: 99,
+ HostID: "other-host",
+ LockID: "other-lock-id",
+ ExpiresAt: time.Now().Add(ttl).Unix(),
+ }
+ insertLock(t, db, existing)
+
+ waitUntil, err := service.Acquire(context.Background(), true)
+ require.NoError(t, err)
+ require.WithinDuration(t, time.Unix(existing.ExpiresAt, 0), waitUntil, time.Second)
+ })
+
+ t.Run("force acquisition does not wait when lock id is unchanged", func(t *testing.T) {
+ db := testutils.NewDatabaseForTest(t)
+ service := newTestAppLockService(t, db)
+
+ insertLock(t, db, lockValue{
+ ProcessID: 99,
+ HostID: "other-host",
+ LockID: service.lockID,
+ ExpiresAt: time.Now().Add(ttl).Unix(),
+ })
+
+ waitUntil, err := service.Acquire(context.Background(), true)
+ require.NoError(t, err)
+ require.True(t, waitUntil.IsZero())
+ })
+
+ t.Run("returns error when existing lock value is invalid JSON", func(t *testing.T) {
+ db := testutils.NewDatabaseForTest(t)
+ service := newTestAppLockService(t, db)
+
+ raw := "this-is-not-json"
+ err := db.Create(&model.KV{Key: lockKey, Value: &raw}).Error
+ require.NoError(t, err)
+
+ _, err = service.Acquire(context.Background(), false)
+ require.ErrorContains(t, err, "decode existing lock value")
+ })
+
+ t.Run("returns context deadline exceeded when database is locked", func(t *testing.T) {
+ db := testutils.NewDatabaseForTest(t)
+ service := newTestAppLockService(t, db)
+
+ tx := lockDatabaseForWrite(t, db)
+ defer tx.Rollback()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 150*time.Millisecond)
+ defer cancel()
+
+ _, err := service.Acquire(ctx, false)
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+ require.ErrorContains(t, err, "begin lock transaction")
+ })
}
func TestAppLockServiceRelease(t *testing.T) {
@@ -134,6 +211,24 @@ func TestAppLockServiceRelease(t *testing.T) {
stored := readLockValue(t, db)
require.Equal(t, existing, stored)
})
+
+ t.Run("returns context deadline exceeded when database is locked", func(t *testing.T) {
+ db := testutils.NewDatabaseForTest(t)
+ service := newTestAppLockService(t, db)
+
+ _, err := service.Acquire(context.Background(), false)
+ require.NoError(t, err)
+
+ tx := lockDatabaseForWrite(t, db)
+ defer tx.Rollback()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 150*time.Millisecond)
+ defer cancel()
+
+ err = service.Release(ctx)
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+ require.ErrorContains(t, err, "release lock failed")
+ })
}
func TestAppLockServiceRenew(t *testing.T) {
@@ -186,4 +281,21 @@ func TestAppLockServiceRenew(t *testing.T) {
err = service.renew(context.Background())
require.ErrorIs(t, err, ErrLockLost)
})
+
+ t.Run("returns context deadline exceeded when database is locked", func(t *testing.T) {
+ db := testutils.NewDatabaseForTest(t)
+ service := newTestAppLockService(t, db)
+
+ _, err := service.Acquire(context.Background(), false)
+ require.NoError(t, err)
+
+ tx := lockDatabaseForWrite(t, db)
+ defer tx.Rollback()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 150*time.Millisecond)
+ defer cancel()
+
+ err = service.renew(ctx)
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+ })
}
diff --git a/backend/internal/service/e2etest_service.go b/backend/internal/service/e2etest_service.go
index 7df15681..6cc76b8a 100644
--- a/backend/internal/service/e2etest_service.go
+++ b/backend/internal/service/e2etest_service.go
@@ -258,7 +258,7 @@ func (s *TestService) SeedDatabase(baseURL string) error {
Nonce: "nonce",
ExpiresAt: datatype.DateTime(time.Now().Add(1 * time.Hour)),
UserID: users[1].ID,
- ClientID: oidcClients[2].ID,
+ ClientID: oidcClients[3].ID,
},
}
for _, authCode := range authCodes {
diff --git a/backend/internal/service/email_service.go b/backend/internal/service/email_service.go
index 05affa5b..7dffa08c 100644
--- a/backend/internal/service/email_service.go
+++ b/backend/internal/service/email_service.go
@@ -150,7 +150,8 @@ func SendEmail[V any](ctx context.Context, srv *EmailService, toEmail email.Addr
}
// Send the email
- if err := srv.sendEmailContent(client, toEmail, c); err != nil {
+ err = srv.sendEmailContent(client, toEmail, c)
+ if err != nil {
return fmt.Errorf("send email content: %w", err)
}
diff --git a/backend/internal/service/ldap_service.go b/backend/internal/service/ldap_service.go
index ac6042b2..05bd1e4c 100644
--- a/backend/internal/service/ldap_service.go
+++ b/backend/internal/service/ldap_service.go
@@ -35,6 +35,7 @@ type LdapService struct {
userService *UserService
groupService *UserGroupService
fileStorage storage.FileStorage
+ clientFactory func() (ldapClient, error)
}
type savePicture struct {
@@ -43,8 +44,33 @@ type savePicture struct {
picture string
}
+type ldapDesiredUser struct {
+ ldapID string
+ input dto.UserCreateDto
+ picture string
+}
+
+type ldapDesiredGroup struct {
+ ldapID string
+ input dto.UserGroupCreateDto
+ memberUsernames []string
+}
+
+type ldapDesiredState struct {
+ users []ldapDesiredUser
+ userIDs map[string]struct{}
+ groups []ldapDesiredGroup
+ groupIDs map[string]struct{}
+}
+
+type ldapClient interface {
+ Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error)
+ Bind(username, password string) error
+ Close() error
+}
+
func NewLdapService(db *gorm.DB, httpClient *http.Client, appConfigService *AppConfigService, userService *UserService, groupService *UserGroupService, fileStorage storage.FileStorage) *LdapService {
- return &LdapService{
+ service := &LdapService{
db: db,
httpClient: httpClient,
appConfigService: appConfigService,
@@ -52,9 +78,12 @@ func NewLdapService(db *gorm.DB, httpClient *http.Client, appConfigService *AppC
groupService: groupService,
fileStorage: fileStorage,
}
+
+ service.clientFactory = service.createClient
+ return service
}
-func (s *LdapService) createClient() (*ldap.Conn, error) {
+func (s *LdapService) createClient() (ldapClient, error) {
dbConfig := s.appConfigService.GetDbConfig()
if !dbConfig.LdapEnabled.IsTrue() {
@@ -79,24 +108,33 @@ func (s *LdapService) createClient() (*ldap.Conn, error) {
func (s *LdapService) SyncAll(ctx context.Context) error {
// Setup LDAP connection
- client, err := s.createClient()
+ client, err := s.clientFactory()
if err != nil {
return fmt.Errorf("failed to create LDAP client: %w", err)
}
defer client.Close()
- // Start a transaction
- tx := s.db.Begin()
- defer func() {
- tx.Rollback()
- }()
+ // First, we fetch all users and group from LDAP, which is our "desired state"
+ desiredState, err := s.fetchDesiredState(ctx, client)
+ if err != nil {
+ return fmt.Errorf("failed to fetch LDAP state: %w", err)
+ }
- savePictures, deleteFiles, err := s.SyncUsers(ctx, tx, client)
+ // Start a transaction
+ tx := s.db.WithContext(ctx).Begin()
+ if tx.Error != nil {
+ return fmt.Errorf("failed to begin database transaction: %w", tx.Error)
+ }
+ defer tx.Rollback()
+
+ // Reconcile users
+ savePictures, deleteFiles, err := s.reconcileUsers(ctx, tx, desiredState.users, desiredState.userIDs)
if err != nil {
return fmt.Errorf("failed to sync users: %w", err)
}
- err = s.SyncGroups(ctx, tx, client)
+ // Reconcile groups
+ err = s.reconcileGroups(ctx, tx, desiredState.groups, desiredState.groupIDs)
if err != nil {
return fmt.Errorf("failed to sync groups: %w", err)
}
@@ -129,10 +167,31 @@ func (s *LdapService) SyncAll(ctx context.Context) error {
return nil
}
-//nolint:gocognit
-func (s *LdapService) SyncGroups(ctx context.Context, tx *gorm.DB, client *ldap.Conn) error {
+func (s *LdapService) fetchDesiredState(ctx context.Context, client ldapClient) (ldapDesiredState, error) {
+ // Fetch users first so we can use their DNs when resolving group members
+ users, userIDs, usernamesByDN, err := s.fetchUsersFromLDAP(ctx, client)
+ if err != nil {
+ return ldapDesiredState{}, err
+ }
+
+ // Then fetch groups to complete the desired LDAP state snapshot
+ groups, groupIDs, err := s.fetchGroupsFromLDAP(ctx, client, usernamesByDN)
+ if err != nil {
+ return ldapDesiredState{}, err
+ }
+
+ return ldapDesiredState{
+ users: users,
+ userIDs: userIDs,
+ groups: groups,
+ groupIDs: groupIDs,
+ }, nil
+}
+
+func (s *LdapService) fetchGroupsFromLDAP(ctx context.Context, client ldapClient, usernamesByDN map[string]string) (desiredGroups []ldapDesiredGroup, ldapGroupIDs map[string]struct{}, err error) {
dbConfig := s.appConfigService.GetDbConfig()
+ // Query LDAP for all groups we want to manage
searchAttrs := []string{
dbConfig.LdapAttributeGroupName.Value,
dbConfig.LdapAttributeGroupUniqueIdentifier.Value,
@@ -149,90 +208,42 @@ func (s *LdapService) SyncGroups(ctx context.Context, tx *gorm.DB, client *ldap.
)
result, err := client.Search(searchReq)
if err != nil {
- return fmt.Errorf("failed to query LDAP: %w", err)
+ return nil, nil, fmt.Errorf("failed to query LDAP groups: %w", err)
}
- // Create a mapping for groups that exist
- ldapGroupIDs := make(map[string]struct{}, len(result.Entries))
+ // Build the in-memory desired state for groups
+ ldapGroupIDs = make(map[string]struct{}, len(result.Entries))
+ desiredGroups = make([]ldapDesiredGroup, 0, len(result.Entries))
for _, value := range result.Entries {
- ldapId := convertLdapIdToString(value.GetAttributeValue(dbConfig.LdapAttributeGroupUniqueIdentifier.Value))
+ ldapID := convertLdapIdToString(value.GetAttributeValue(dbConfig.LdapAttributeGroupUniqueIdentifier.Value))
// Skip groups without a valid LDAP ID
- if ldapId == "" {
+ if ldapID == "" {
slog.Warn("Skipping LDAP group without a valid unique identifier", slog.String("attribute", dbConfig.LdapAttributeGroupUniqueIdentifier.Value))
continue
}
- ldapGroupIDs[ldapId] = struct{}{}
-
- // Try to find the group in the database
- var databaseGroup model.UserGroup
- err = tx.
- WithContext(ctx).
- Where("ldap_id = ?", ldapId).
- First(&databaseGroup).
- Error
- if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
- // This could error with ErrRecordNotFound and we want to ignore that here
- return fmt.Errorf("failed to query for LDAP group ID '%s': %w", ldapId, err)
- }
+ ldapGroupIDs[ldapID] = struct{}{}
// Get group members and add to the correct Group
groupMembers := value.GetAttributeValues(dbConfig.LdapAttributeGroupMember.Value)
- membersUserId := make([]string, 0, len(groupMembers))
+ memberUsernames := make([]string, 0, len(groupMembers))
for _, member := range groupMembers {
- username := getDNProperty(dbConfig.LdapAttributeUserUsername.Value, member)
-
- // If username extraction fails, try to query LDAP directly for the user
+ username := s.resolveGroupMemberUsername(ctx, client, member, usernamesByDN)
if username == "" {
- // Query LDAP to get the user by their DN
- userSearchReq := ldap.NewSearchRequest(
- member,
- ldap.ScopeBaseObject,
- 0, 0, 0, false,
- "(objectClass=*)",
- []string{dbConfig.LdapAttributeUserUsername.Value, dbConfig.LdapAttributeUserUniqueIdentifier.Value},
- []ldap.Control{},
- )
-
- userResult, err := client.Search(userSearchReq)
- if err != nil || len(userResult.Entries) == 0 {
- slog.WarnContext(ctx, "Could not resolve group member DN", slog.String("member", member), slog.Any("error", err))
- continue
- }
-
- username = userResult.Entries[0].GetAttributeValue(dbConfig.LdapAttributeUserUsername.Value)
- if username == "" {
- slog.WarnContext(ctx, "Could not extract username from group member DN", slog.String("member", member))
- continue
- }
- }
-
- username = norm.NFC.String(username)
-
- var databaseUser model.User
- err = tx.
- WithContext(ctx).
- Where("username = ? AND ldap_id IS NOT NULL", username).
- First(&databaseUser).
- Error
- if errors.Is(err, gorm.ErrRecordNotFound) {
- // The user collides with a non-LDAP user, so we skip it
continue
- } else if err != nil {
- return fmt.Errorf("failed to query for existing user '%s': %w", username, err)
}
- membersUserId = append(membersUserId, databaseUser.ID)
+ memberUsernames = append(memberUsernames, username)
}
syncGroup := dto.UserGroupCreateDto{
Name: value.GetAttributeValue(dbConfig.LdapAttributeGroupName.Value),
FriendlyName: value.GetAttributeValue(dbConfig.LdapAttributeGroupName.Value),
- LdapID: ldapId,
+ LdapID: ldapID,
}
- dto.Normalize(syncGroup)
+ dto.Normalize(&syncGroup)
err = syncGroup.Validate()
if err != nil {
@@ -240,64 +251,20 @@ func (s *LdapService) SyncGroups(ctx context.Context, tx *gorm.DB, client *ldap.
continue
}
- if databaseGroup.ID == "" {
- newGroup, err := s.groupService.createInternal(ctx, syncGroup, tx)
- if err != nil {
- return fmt.Errorf("failed to create group '%s': %w", syncGroup.Name, err)
- }
-
- _, err = s.groupService.updateUsersInternal(ctx, newGroup.ID, membersUserId, tx)
- if err != nil {
- return fmt.Errorf("failed to sync users for group '%s': %w", syncGroup.Name, err)
- }
- } else {
- _, err = s.groupService.updateInternal(ctx, databaseGroup.ID, syncGroup, true, tx)
- if err != nil {
- return fmt.Errorf("failed to update group '%s': %w", syncGroup.Name, err)
- }
-
- _, err = s.groupService.updateUsersInternal(ctx, databaseGroup.ID, membersUserId, tx)
- if err != nil {
- return fmt.Errorf("failed to sync users for group '%s': %w", syncGroup.Name, err)
- }
- }
+ desiredGroups = append(desiredGroups, ldapDesiredGroup{
+ ldapID: ldapID,
+ input: syncGroup,
+ memberUsernames: memberUsernames,
+ })
}
- // Get all LDAP groups from the database
- var ldapGroupsInDb []model.UserGroup
- err = tx.
- WithContext(ctx).
- Find(&ldapGroupsInDb, "ldap_id IS NOT NULL").
- Select("ldap_id").
- Error
- if err != nil {
- return fmt.Errorf("failed to fetch groups from database: %w", err)
- }
-
- // Delete groups that no longer exist in LDAP
- for _, group := range ldapGroupsInDb {
- if _, exists := ldapGroupIDs[*group.LdapID]; exists {
- continue
- }
-
- err = tx.
- WithContext(ctx).
- Delete(&model.UserGroup{}, "ldap_id = ?", group.LdapID).
- Error
- if err != nil {
- return fmt.Errorf("failed to delete group '%s': %w", group.Name, err)
- }
-
- slog.Info("Deleted group", slog.String("group", group.Name))
- }
-
- return nil
+ return desiredGroups, ldapGroupIDs, nil
}
-//nolint:gocognit
-func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.Conn) (savePictures []savePicture, deleteFiles []string, err error) {
+func (s *LdapService) fetchUsersFromLDAP(ctx context.Context, client ldapClient) (desiredUsers []ldapDesiredUser, ldapUserIDs map[string]struct{}, usernamesByDN map[string]string, err error) {
dbConfig := s.appConfigService.GetDbConfig()
+ // Query LDAP for all users we want to manage
searchAttrs := []string{
"memberOf",
"sn",
@@ -323,50 +290,29 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
result, err := client.Search(searchReq)
if err != nil {
- return nil, nil, fmt.Errorf("failed to query LDAP: %w", err)
+ return nil, nil, nil, fmt.Errorf("failed to query LDAP users: %w", err)
}
- // Create a mapping for users that exist
- ldapUserIDs := make(map[string]struct{}, len(result.Entries))
- savePictures = make([]savePicture, 0, len(result.Entries))
+ // Build the in-memory desired state for users and a DN lookup for group membership resolution
+ ldapUserIDs = make(map[string]struct{}, len(result.Entries))
+ usernamesByDN = make(map[string]string, len(result.Entries))
+ desiredUsers = make([]ldapDesiredUser, 0, len(result.Entries))
for _, value := range result.Entries {
- ldapId := convertLdapIdToString(value.GetAttributeValue(dbConfig.LdapAttributeUserUniqueIdentifier.Value))
+ username := norm.NFC.String(value.GetAttributeValue(dbConfig.LdapAttributeUserUsername.Value))
+ if normalizedDN := normalizeLDAPDN(value.DN); normalizedDN != "" && username != "" {
+ usernamesByDN[normalizedDN] = username
+ }
+
+ ldapID := convertLdapIdToString(value.GetAttributeValue(dbConfig.LdapAttributeUserUniqueIdentifier.Value))
// Skip users without a valid LDAP ID
- if ldapId == "" {
+ if ldapID == "" {
slog.Warn("Skipping LDAP user without a valid unique identifier", slog.String("attribute", dbConfig.LdapAttributeUserUniqueIdentifier.Value))
continue
}
- ldapUserIDs[ldapId] = struct{}{}
-
- // Get the user from the database
- var databaseUser model.User
- err = tx.
- WithContext(ctx).
- Where("ldap_id = ?", ldapId).
- First(&databaseUser).
- Error
-
- // If a user is found (even if disabled), enable them since they're now back in LDAP
- if databaseUser.ID != "" && databaseUser.Disabled {
- err = tx.
- WithContext(ctx).
- Model(&model.User{}).
- Where("id = ?", databaseUser.ID).
- Update("disabled", false).
- Error
-
- if err != nil {
- return nil, nil, fmt.Errorf("failed to enable user %s: %w", databaseUser.Username, err)
- }
- }
-
- if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
- // This could error with ErrRecordNotFound and we want to ignore that here
- return nil, nil, fmt.Errorf("failed to query for LDAP user ID '%s': %w", ldapId, err)
- }
+ ldapUserIDs[ldapID] = struct{}{}
// Check if user is admin by checking if they are in the admin group
isAdmin := false
@@ -385,14 +331,14 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
LastName: value.GetAttributeValue(dbConfig.LdapAttributeUserLastName.Value),
DisplayName: value.GetAttributeValue(dbConfig.LdapAttributeUserDisplayName.Value),
IsAdmin: isAdmin,
- LdapID: ldapId,
+ LdapID: ldapID,
}
if newUser.DisplayName == "" {
newUser.DisplayName = strings.TrimSpace(newUser.FirstName + " " + newUser.LastName)
}
- dto.Normalize(newUser)
+ dto.Normalize(&newUser)
err = newUser.Validate()
if err != nil {
@@ -400,53 +346,201 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
continue
}
- userID := databaseUser.ID
- if databaseUser.ID == "" {
- createdUser, err := s.userService.createUserInternal(ctx, newUser, true, tx)
- if errors.Is(err, &common.AlreadyInUseError{}) {
- slog.Warn("Skipping creating LDAP user", slog.String("username", newUser.Username), slog.Any("error", err))
+ desiredUsers = append(desiredUsers, ldapDesiredUser{
+ ldapID: ldapID,
+ input: newUser,
+ picture: value.GetAttributeValue(dbConfig.LdapAttributeUserProfilePicture.Value),
+ })
+ }
+
+ return desiredUsers, ldapUserIDs, usernamesByDN, nil
+}
+
+func (s *LdapService) resolveGroupMemberUsername(ctx context.Context, client ldapClient, member string, usernamesByDN map[string]string) string {
+ dbConfig := s.appConfigService.GetDbConfig()
+
+ // First try the DN cache we built while loading users
+ username, exists := usernamesByDN[normalizeLDAPDN(member)]
+ if exists && username != "" {
+ return username
+ }
+
+ // Then try to extract the username directly from the DN
+ username = getDNProperty(dbConfig.LdapAttributeUserUsername.Value, member)
+ if username != "" {
+ return norm.NFC.String(username)
+ }
+
+ // As a fallback, query LDAP for the referenced entry
+ userSearchReq := ldap.NewSearchRequest(
+ member,
+ ldap.ScopeBaseObject,
+ 0, 0, 0, false,
+ "(objectClass=*)",
+ []string{dbConfig.LdapAttributeUserUsername.Value},
+ []ldap.Control{},
+ )
+
+ userResult, err := client.Search(userSearchReq)
+ if err != nil || len(userResult.Entries) == 0 {
+ slog.WarnContext(ctx, "Could not resolve group member DN", slog.String("member", member), slog.Any("error", err))
+ return ""
+ }
+
+ username = userResult.Entries[0].GetAttributeValue(dbConfig.LdapAttributeUserUsername.Value)
+ if username == "" {
+ slog.WarnContext(ctx, "Could not extract username from group member DN", slog.String("member", member))
+ return ""
+ }
+
+ return norm.NFC.String(username)
+}
+
+func (s *LdapService) reconcileGroups(ctx context.Context, tx *gorm.DB, desiredGroups []ldapDesiredGroup, ldapGroupIDs map[string]struct{}) error {
+ // Load the current LDAP-managed state from the database
+ ldapGroupsInDB, ldapGroupsByID, err := s.loadLDAPGroupsInDB(ctx, tx)
+ if err != nil {
+ return fmt.Errorf("failed to fetch groups from database: %w", err)
+ }
+
+ _, _, ldapUsersByUsername, err := s.loadLDAPUsersInDB(ctx, tx)
+ if err != nil {
+ return fmt.Errorf("failed to fetch users from database: %w", err)
+ }
+
+ // Apply creates and updates to match the desired LDAP group state
+ for _, desiredGroup := range desiredGroups {
+ memberUserIDs := make([]string, 0, len(desiredGroup.memberUsernames))
+ for _, username := range desiredGroup.memberUsernames {
+ databaseUser, exists := ldapUsersByUsername[username]
+ if !exists {
+ // The user collides with a non-LDAP user or was skipped during user sync, so we ignore it
continue
- } else if err != nil {
- return nil, nil, fmt.Errorf("error creating user '%s': %w", newUser.Username, err)
- }
- userID = createdUser.ID
- } else {
- _, err = s.userService.updateUserInternal(ctx, databaseUser.ID, newUser, false, true, tx)
- if errors.Is(err, &common.AlreadyInUseError{}) {
- slog.Warn("Skipping updating LDAP user", slog.String("username", newUser.Username), slog.Any("error", err))
- continue
- } else if err != nil {
- return nil, nil, fmt.Errorf("error updating user '%s': %w", newUser.Username, err)
}
+
+ memberUserIDs = append(memberUserIDs, databaseUser.ID)
}
- // Save profile picture
- pictureString := value.GetAttributeValue(dbConfig.LdapAttributeUserProfilePicture.Value)
- if pictureString != "" {
- // Storage operations must be executed outside of a transaction
- savePictures = append(savePictures, savePicture{
- userID: databaseUser.ID,
- username: userID,
- picture: pictureString,
- })
+ databaseGroup := ldapGroupsByID[desiredGroup.ldapID]
+ if databaseGroup.ID == "" {
+ newGroup, err := s.groupService.createInternal(ctx, desiredGroup.input, tx)
+ if err != nil {
+ return fmt.Errorf("failed to create group '%s': %w", desiredGroup.input.Name, err)
+ }
+ ldapGroupsByID[desiredGroup.ldapID] = newGroup
+
+ _, err = s.groupService.updateUsersInternal(ctx, newGroup.ID, memberUserIDs, tx)
+ if err != nil {
+ return fmt.Errorf("failed to sync users for group '%s': %w", desiredGroup.input.Name, err)
+ }
+ continue
+ }
+
+ _, err = s.groupService.updateInternal(ctx, databaseGroup.ID, desiredGroup.input, true, tx)
+ if err != nil {
+ return fmt.Errorf("failed to update group '%s': %w", desiredGroup.input.Name, err)
+ }
+
+ _, err = s.groupService.updateUsersInternal(ctx, databaseGroup.ID, memberUserIDs, tx)
+ if err != nil {
+ return fmt.Errorf("failed to sync users for group '%s': %w", desiredGroup.input.Name, err)
}
}
- // Get all LDAP users from the database
- var ldapUsersInDb []model.User
- err = tx.
- WithContext(ctx).
- Find(&ldapUsersInDb, "ldap_id IS NOT NULL").
- Select("id, username, ldap_id, disabled").
- Error
+ // Delete groups that are no longer present in LDAP
+ for _, group := range ldapGroupsInDB {
+ if group.LdapID == nil {
+ continue
+ }
+
+ if _, exists := ldapGroupIDs[*group.LdapID]; exists {
+ continue
+ }
+
+ err = tx.
+ WithContext(ctx).
+ Delete(&model.UserGroup{}, "ldap_id = ?", *group.LdapID).
+ Error
+ if err != nil {
+ return fmt.Errorf("failed to delete group '%s': %w", group.Name, err)
+ }
+
+ slog.Info("Deleted group", slog.String("group", group.Name))
+ }
+
+ return nil
+}
+
+//nolint:gocognit
+func (s *LdapService) reconcileUsers(ctx context.Context, tx *gorm.DB, desiredUsers []ldapDesiredUser, ldapUserIDs map[string]struct{}) (savePictures []savePicture, deleteFiles []string, err error) {
+ dbConfig := s.appConfigService.GetDbConfig()
+
+ // Load the current LDAP-managed state from the database
+ ldapUsersInDB, ldapUsersByID, _, err := s.loadLDAPUsersInDB(ctx, tx)
if err != nil {
return nil, nil, fmt.Errorf("failed to fetch users from database: %w", err)
}
- // Mark users as disabled or delete users that no longer exist in LDAP
- deleteFiles = make([]string, 0, len(ldapUserIDs))
- for _, user := range ldapUsersInDb {
- // Skip if the user ID exists in the fetched LDAP results
+ // Apply creates and updates to match the desired LDAP user state
+ savePictures = make([]savePicture, 0, len(desiredUsers))
+
+ for _, desiredUser := range desiredUsers {
+ databaseUser := ldapUsersByID[desiredUser.ldapID]
+
+ // If a user is found (even if disabled), enable them since they're now back in LDAP.
+ if databaseUser.ID != "" && databaseUser.Disabled {
+ err = tx.
+ WithContext(ctx).
+ Model(&model.User{}).
+ Where("id = ?", databaseUser.ID).
+ Update("disabled", false).
+ Error
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to enable user %s: %w", databaseUser.Username, err)
+ }
+
+ databaseUser.Disabled = false
+ ldapUsersByID[desiredUser.ldapID] = databaseUser
+ }
+
+ userID := databaseUser.ID
+ if databaseUser.ID == "" {
+ createdUser, err := s.userService.createUserInternal(ctx, desiredUser.input, true, tx)
+ if errors.Is(err, &common.AlreadyInUseError{}) {
+ slog.Warn("Skipping creating LDAP user", slog.String("username", desiredUser.input.Username), slog.Any("error", err))
+ continue
+ } else if err != nil {
+ return nil, nil, fmt.Errorf("error creating user '%s': %w", desiredUser.input.Username, err)
+ }
+
+ userID = createdUser.ID
+ ldapUsersByID[desiredUser.ldapID] = createdUser
+ } else {
+ _, err = s.userService.updateUserInternal(ctx, databaseUser.ID, desiredUser.input, false, true, tx)
+ if errors.Is(err, &common.AlreadyInUseError{}) {
+ slog.Warn("Skipping updating LDAP user", slog.String("username", desiredUser.input.Username), slog.Any("error", err))
+ continue
+ } else if err != nil {
+ return nil, nil, fmt.Errorf("error updating user '%s': %w", desiredUser.input.Username, err)
+ }
+ }
+
+ if desiredUser.picture != "" {
+ savePictures = append(savePictures, savePicture{
+ userID: userID,
+ username: desiredUser.input.Username,
+ picture: desiredUser.picture,
+ })
+ }
+ }
+
+ // Disable or delete users that are no longer present in LDAP
+ deleteFiles = make([]string, 0, len(ldapUsersInDB))
+ for _, user := range ldapUsersInDB {
+ if user.LdapID == nil {
+ continue
+ }
+
if _, exists := ldapUserIDs[*user.LdapID]; exists {
continue
}
@@ -458,29 +552,73 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
}
slog.Info("Disabled user", slog.String("username", user.Username))
- } else {
- err = s.userService.deleteUserInternal(ctx, tx, user.ID, true)
- if err != nil {
- target := &common.LdapUserUpdateError{}
- if errors.As(err, &target) {
- return nil, nil, fmt.Errorf("failed to delete user %s: LDAP user must be disabled before deletion", user.Username)
- }
- return nil, nil, fmt.Errorf("failed to delete user %s: %w", user.Username, err)
- }
-
- slog.Info("Deleted user", slog.String("username", user.Username))
-
- // Storage operations must be executed outside of a transaction
- deleteFiles = append(deleteFiles, path.Join("profile-pictures", user.ID+".png"))
+ continue
}
+
+ err = s.userService.deleteUserInternal(ctx, tx, user.ID, true)
+ if err != nil {
+ target := &common.LdapUserUpdateError{}
+ if errors.As(err, &target) {
+ return nil, nil, fmt.Errorf("failed to delete user %s: LDAP user must be disabled before deletion", user.Username)
+ }
+ return nil, nil, fmt.Errorf("failed to delete user %s: %w", user.Username, err)
+ }
+
+ slog.Info("Deleted user", slog.String("username", user.Username))
+ deleteFiles = append(deleteFiles, path.Join("profile-pictures", user.ID+".png"))
}
return savePictures, deleteFiles, nil
}
+func (s *LdapService) loadLDAPUsersInDB(ctx context.Context, tx *gorm.DB) (users []model.User, byLdapID map[string]model.User, byUsername map[string]model.User, err error) {
+ // Load all LDAP-managed users and index them by LDAP ID and by username
+ err = tx.
+ WithContext(ctx).
+ Select("id, username, ldap_id, disabled").
+ Where("ldap_id IS NOT NULL").
+ Find(&users).
+ Error
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ byLdapID = make(map[string]model.User, len(users))
+ byUsername = make(map[string]model.User, len(users))
+ for _, user := range users {
+ byLdapID[*user.LdapID] = user
+ byUsername[user.Username] = user
+ }
+
+ return users, byLdapID, byUsername, nil
+}
+
+func (s *LdapService) loadLDAPGroupsInDB(ctx context.Context, tx *gorm.DB) ([]model.UserGroup, map[string]model.UserGroup, error) {
+ var groups []model.UserGroup
+
+ // Load all LDAP-managed groups and index them by LDAP ID
+ err := tx.
+ WithContext(ctx).
+ Select("id, name, ldap_id").
+ Where("ldap_id IS NOT NULL").
+ Find(&groups).
+ Error
+ if err != nil {
+ return nil, nil, err
+ }
+
+ groupsByID := make(map[string]model.UserGroup, len(groups))
+ for _, group := range groups {
+ groupsByID[*group.LdapID] = group
+ }
+
+ return groups, groupsByID, nil
+}
+
func (s *LdapService) saveProfilePicture(parentCtx context.Context, userId string, pictureString string) error {
var reader io.ReadSeeker
+ // Accept either a URL, a base64-encoded payload, or raw binary data
_, err := url.ParseRequestURI(pictureString)
if err == nil {
ctx, cancel := context.WithTimeout(parentCtx, 15*time.Second)
@@ -522,6 +660,31 @@ func (s *LdapService) saveProfilePicture(parentCtx context.Context, userId strin
return nil
}
+// normalizeLDAPDN returns a canonical lowercase form of a DN for use as a map key.
+// Different LDAP servers may format the same DN with varying attribute type casing (e.g. "CN=" vs "cn=") or extra whitespace (e.g. "dc=example, dc=com").
+// Without normalization, cache lookups in usernamesByDN would miss when a member attribute value uses a different format than the DN returned in the search entry
+//
+// ldap.ParseDN is used instead of simple lowercasing because it correctly handles multi-valued RDNs (joined with "+") and strips inter-component whitespace.
+// If parsing fails for any reason, we fall back to a simple lowercase+trim.
+func normalizeLDAPDN(dn string) string {
+ parsed, err := ldap.ParseDN(dn)
+ if err != nil {
+ return strings.ToLower(strings.TrimSpace(dn))
+ }
+
+ // Reconstruct the DN in a canonical form: lowercase type=lowercase value, with RDN components separated by "," and multi-value attributes by "+"
+ parts := make([]string, 0, len(parsed.RDNs))
+ for _, rdn := range parsed.RDNs {
+ attrs := make([]string, 0, len(rdn.Attributes))
+ for _, attr := range rdn.Attributes {
+ attrs = append(attrs, strings.ToLower(attr.Type)+"="+strings.ToLower(attr.Value))
+ }
+ parts = append(parts, strings.Join(attrs, "+"))
+ }
+
+ return strings.Join(parts, ",")
+}
+
// getDNProperty returns the value of a property from a LDAP identifier
// See: https://learn.microsoft.com/en-us/previous-versions/windows/desktop/ldap/distinguished-names
func getDNProperty(property string, str string) string {
diff --git a/backend/internal/service/ldap_service_test.go b/backend/internal/service/ldap_service_test.go
index 1a049bfe..22553f8b 100644
--- a/backend/internal/service/ldap_service_test.go
+++ b/backend/internal/service/ldap_service_test.go
@@ -1,9 +1,286 @@
package service
import (
+ "net/http"
"testing"
+
+ "github.com/go-ldap/ldap/v3"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gorm.io/gorm"
+
+ "github.com/pocket-id/pocket-id/backend/internal/model"
+ "github.com/pocket-id/pocket-id/backend/internal/storage"
+ testutils "github.com/pocket-id/pocket-id/backend/internal/utils/testing"
)
+type fakeLDAPClient struct {
+ searchFn func(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error)
+}
+
+func (c *fakeLDAPClient) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) {
+ if c.searchFn == nil {
+ return nil, nil
+ }
+
+ return c.searchFn(searchRequest)
+}
+
+func (c *fakeLDAPClient) Bind(_, _ string) error {
+ return nil
+}
+
+func (c *fakeLDAPClient) Close() error {
+ return nil
+}
+
+func TestLdapServiceSyncAllReconcilesUsersAndGroups(t *testing.T) {
+ service, db := newTestLdapService(t, newFakeLDAPClient(
+ ldapSearchResult(
+ ldapEntry("uid=alice,ou=people,dc=example,dc=com", map[string][]string{
+ "entryUUID": {"u-alice"},
+ "uid": {"alice"},
+ "mail": {"alice@example.com"},
+ "givenName": {"Alice"},
+ "sn": {"Jones"},
+ "displayName": {""},
+ "memberOf": {"cn=admins,ou=groups,dc=example,dc=com"},
+ }),
+ ldapEntry("uid=bob,ou=people,dc=example,dc=com", map[string][]string{
+ "entryUUID": {"u-bob"},
+ "uid": {"bob"},
+ "mail": {"bob@example.com"},
+ "givenName": {"Bob"},
+ "sn": {"Brown"},
+ "displayName": {""},
+ }),
+ ),
+ ldapSearchResult(
+ ldapEntry("cn=team,ou=groups,dc=example,dc=com", map[string][]string{
+ "entryUUID": {"g-team"},
+ "cn": {"team"},
+ "member": {
+ "UID=Alice, OU=People, DC=example, DC=com",
+ "uid=bob, ou=people, dc=example, dc=com",
+ },
+ }),
+ ),
+ ))
+
+ aliceLdapID := "u-alice"
+ missingLdapID := "u-missing"
+ teamLdapID := "g-team"
+ oldGroupLdapID := "g-old"
+
+ require.NoError(t, db.Create(&model.User{
+ Username: "alice-old",
+ Email: new("alice-old@example.com"),
+ EmailVerified: true,
+ FirstName: "Old",
+ LastName: "Name",
+ DisplayName: "Old Name",
+ LdapID: &aliceLdapID,
+ Disabled: true,
+ }).Error)
+
+ require.NoError(t, db.Create(&model.User{
+ Username: "missing",
+ Email: new("missing@example.com"),
+ EmailVerified: true,
+ FirstName: "Missing",
+ LastName: "User",
+ DisplayName: "Missing User",
+ LdapID: &missingLdapID,
+ }).Error)
+
+ require.NoError(t, db.Create(&model.UserGroup{
+ Name: "team-old",
+ FriendlyName: "team-old",
+ LdapID: &teamLdapID,
+ }).Error)
+
+ require.NoError(t, db.Create(&model.UserGroup{
+ Name: "old-group",
+ FriendlyName: "old-group",
+ LdapID: &oldGroupLdapID,
+ }).Error)
+
+ require.NoError(t, service.SyncAll(t.Context()))
+
+ var alice model.User
+ require.NoError(t, db.First(&alice, "ldap_id = ?", aliceLdapID).Error)
+ assert.Equal(t, "alice", alice.Username)
+ assert.Equal(t, new("alice@example.com"), alice.Email)
+ assert.Equal(t, "Alice", alice.FirstName)
+ assert.Equal(t, "Jones", alice.LastName)
+ assert.Equal(t, "Alice Jones", alice.DisplayName)
+ assert.True(t, alice.IsAdmin)
+ assert.False(t, alice.Disabled)
+
+ var bob model.User
+ require.NoError(t, db.First(&bob, "ldap_id = ?", "u-bob").Error)
+ assert.Equal(t, "bob", bob.Username)
+ assert.Equal(t, "Bob Brown", bob.DisplayName)
+
+ var missing model.User
+ require.NoError(t, db.First(&missing, "ldap_id = ?", missingLdapID).Error)
+ assert.True(t, missing.Disabled)
+
+ var oldGroupCount int64
+ require.NoError(t, db.Model(&model.UserGroup{}).Where("ldap_id = ?", oldGroupLdapID).Count(&oldGroupCount).Error)
+ assert.Zero(t, oldGroupCount)
+
+ var team model.UserGroup
+ require.NoError(t, db.Preload("Users").First(&team, "ldap_id = ?", teamLdapID).Error)
+ assert.Equal(t, "team", team.Name)
+ assert.Equal(t, "team", team.FriendlyName)
+ assert.ElementsMatch(t, []string{"alice", "bob"}, usernames(team.Users))
+}
+
+func TestLdapServiceSyncAllHandlesDuplicateLDAPIDsInSingleRun(t *testing.T) {
+ service, db := newTestLdapService(t, newFakeLDAPClient(
+ ldapSearchResult(
+ ldapEntry("uid=alice,ou=people,dc=example,dc=com", map[string][]string{
+ "entryUUID": {"u-dup"},
+ "uid": {"alice"},
+ "mail": {"alice@example.com"},
+ "givenName": {"Alice"},
+ "sn": {"Doe"},
+ "displayName": {"Alice Doe"},
+ }),
+ ldapEntry("uid=alice,ou=people,dc=example,dc=com", map[string][]string{
+ "entryUUID": {"u-dup"},
+ "uid": {"alice"},
+ "mail": {"alice@example.com"},
+ "givenName": {"Alicia"},
+ "sn": {"Doe"},
+ "displayName": {"Alicia Doe"},
+ }),
+ ),
+ ldapSearchResult(
+ ldapEntry("cn=team,ou=groups,dc=example,dc=com", map[string][]string{
+ "entryUUID": {"g-dup"},
+ "cn": {"team"},
+ "member": {"uid=alice,ou=people,dc=example,dc=com"},
+ }),
+ ldapEntry("cn=team,ou=groups,dc=example,dc=com", map[string][]string{
+ "entryUUID": {"g-dup"},
+ "cn": {"team-renamed"},
+ "member": {"uid=alice,ou=people,dc=example,dc=com"},
+ }),
+ ),
+ ))
+
+ require.NoError(t, service.SyncAll(t.Context()))
+
+ var users []model.User
+ require.NoError(t, db.Find(&users, "ldap_id = ?", "u-dup").Error)
+ require.Len(t, users, 1)
+ assert.Equal(t, "alice", users[0].Username)
+ assert.Equal(t, "Alicia", users[0].FirstName)
+ assert.Equal(t, "Alicia Doe", users[0].DisplayName)
+
+ var groups []model.UserGroup
+ require.NoError(t, db.Preload("Users").Find(&groups, "ldap_id = ?", "g-dup").Error)
+ require.Len(t, groups, 1)
+ assert.Equal(t, "team-renamed", groups[0].Name)
+ assert.Equal(t, "team-renamed", groups[0].FriendlyName)
+ assert.ElementsMatch(t, []string{"alice"}, usernames(groups[0].Users))
+}
+
+func newTestLdapService(t *testing.T, client ldapClient) (*LdapService, *gorm.DB) {
+ t.Helper()
+
+ db := testutils.NewDatabaseForTest(t)
+
+ fileStorage, err := storage.NewDatabaseStorage(db)
+ require.NoError(t, err)
+
+ appConfig := NewTestAppConfigService(&model.AppConfig{
+ RequireUserEmail: model.AppConfigVariable{Value: "false"},
+ LdapEnabled: model.AppConfigVariable{Value: "true"},
+ LdapBase: model.AppConfigVariable{Value: "dc=example,dc=com"},
+ LdapUserSearchFilter: model.AppConfigVariable{Value: "(objectClass=person)"},
+ LdapUserGroupSearchFilter: model.AppConfigVariable{Value: "(objectClass=groupOfNames)"},
+ LdapAttributeUserUniqueIdentifier: model.AppConfigVariable{Value: "entryUUID"},
+ LdapAttributeUserUsername: model.AppConfigVariable{Value: "uid"},
+ LdapAttributeUserEmail: model.AppConfigVariable{Value: "mail"},
+ LdapAttributeUserFirstName: model.AppConfigVariable{Value: "givenName"},
+ LdapAttributeUserLastName: model.AppConfigVariable{Value: "sn"},
+ LdapAttributeUserDisplayName: model.AppConfigVariable{Value: "displayName"},
+ LdapAttributeUserProfilePicture: model.AppConfigVariable{Value: "jpegPhoto"},
+ LdapAttributeGroupMember: model.AppConfigVariable{Value: "member"},
+ LdapAttributeGroupUniqueIdentifier: model.AppConfigVariable{Value: "entryUUID"},
+ LdapAttributeGroupName: model.AppConfigVariable{Value: "cn"},
+ LdapAdminGroupName: model.AppConfigVariable{Value: "admins"},
+ LdapSoftDeleteUsers: model.AppConfigVariable{Value: "true"},
+ })
+
+ groupService := NewUserGroupService(db, appConfig, nil)
+ userService := NewUserService(
+ db,
+ nil,
+ nil,
+ nil,
+ appConfig,
+ NewCustomClaimService(db),
+ NewAppImagesService(map[string]string{}, fileStorage),
+ nil,
+ fileStorage,
+ )
+
+ service := NewLdapService(db, &http.Client{}, appConfig, userService, groupService, fileStorage)
+ service.clientFactory = func() (ldapClient, error) {
+ return client, nil
+ }
+
+ return service, db
+}
+
+func newFakeLDAPClient(userResult, groupResult *ldap.SearchResult) ldapClient {
+ return &fakeLDAPClient{
+ searchFn: func(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) {
+ switch searchRequest.Filter {
+ case "(objectClass=person)":
+ return userResult, nil
+ case "(objectClass=groupOfNames)":
+ return groupResult, nil
+ default:
+ return &ldap.SearchResult{}, nil
+ }
+ },
+ }
+}
+
+func ldapSearchResult(entries ...*ldap.Entry) *ldap.SearchResult {
+ return &ldap.SearchResult{Entries: entries}
+}
+
+func ldapEntry(dn string, attrs map[string][]string) *ldap.Entry {
+ entry := &ldap.Entry{
+ DN: dn,
+ Attributes: make([]*ldap.EntryAttribute, 0, len(attrs)),
+ }
+
+ for name, values := range attrs {
+ entry.Attributes = append(entry.Attributes, &ldap.EntryAttribute{
+ Name: name,
+ Values: values,
+ })
+ }
+
+ return entry
+}
+
+func usernames(users []model.User) []string {
+ result := make([]string, 0, len(users))
+ for _, user := range users {
+ result = append(result, user.Username)
+ }
+
+ return result
+}
+
func TestGetDNProperty(t *testing.T) {
tests := []struct {
name string
@@ -64,10 +341,58 @@ func TestGetDNProperty(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := getDNProperty(tt.property, tt.dn)
- if result != tt.expectedResult {
- t.Errorf("getDNProperty(%q, %q) = %q, want %q",
- tt.property, tt.dn, result, tt.expectedResult)
- }
+ assert.Equalf(t, tt.expectedResult, result, "getDNProperty(%q, %q)", tt.property, tt.dn)
+ })
+ }
+}
+
+func TestNormalizeLDAPDN(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected string
+ }{
+ {
+ name: "already normalized",
+ input: "cn=alice,dc=example,dc=com",
+ expected: "cn=alice,dc=example,dc=com",
+ },
+ {
+ name: "uppercase attribute types",
+ input: "CN=Alice,DC=example,DC=com",
+ expected: "cn=alice,dc=example,dc=com",
+ },
+ {
+ name: "spaces after commas",
+ input: "cn=alice, dc=example, dc=com",
+ expected: "cn=alice,dc=example,dc=com",
+ },
+ {
+ name: "uppercase types and spaces",
+ input: "CN=Alice, DC=example, DC=com",
+ expected: "cn=alice,dc=example,dc=com",
+ },
+ {
+ name: "multi-valued RDN",
+ input: "cn=alice+uid=a123,dc=example,dc=com",
+ expected: "cn=alice+uid=a123,dc=example,dc=com",
+ },
+ {
+ name: "invalid DN falls back to lowercase+trim",
+ input: " NOT A VALID DN ",
+ expected: "not a valid dn",
+ },
+ {
+ name: "empty string",
+ input: "",
+ expected: "",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := normalizeLDAPDN(tt.input)
+ assert.Equalf(t, tt.expected, result, "normalizeLDAPDN(%q)", tt.input)
})
}
}
@@ -98,9 +423,7 @@ func TestConvertLdapIdToString(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := convertLdapIdToString(tt.input)
- if got != tt.expected {
- t.Errorf("Expected %q, got %q", tt.expected, got)
- }
+ assert.Equal(t, tt.expected, got)
})
}
}
diff --git a/backend/internal/service/oidc_service.go b/backend/internal/service/oidc_service.go
index 1d04c8d1..9d0b7ff0 100644
--- a/backend/internal/service/oidc_service.go
+++ b/backend/internal/service/oidc_service.go
@@ -404,7 +404,7 @@ func (s *OidcService) createTokenFromAuthorizationCode(ctx context.Context, inpu
}
}
- if authorizationCodeMetaData.ClientID != input.ClientID && authorizationCodeMetaData.ExpiresAt.ToTime().Before(time.Now()) {
+ if authorizationCodeMetaData.ClientID != input.ClientID || authorizationCodeMetaData.ExpiresAt.ToTime().Before(time.Now()) {
return CreatedTokens{}, &common.OidcInvalidAuthorizationCodeError{}
}
diff --git a/backend/internal/service/one_time_access_service.go b/backend/internal/service/one_time_access_service.go
index a9d80d80..1b84f498 100644
--- a/backend/internal/service/one_time_access_service.go
+++ b/backend/internal/service/one_time_access_service.go
@@ -79,7 +79,7 @@ func (s *OneTimeAccessService) requestOneTimeAccessEmailInternal(ctx context.Con
tx.Rollback()
}()
- user, err := s.userService.GetUser(ctx, userID)
+ user, err := s.userService.getUserInternal(ctx, userID, tx)
if err != nil {
return nil, err
}
@@ -131,8 +131,32 @@ func (s *OneTimeAccessService) requestOneTimeAccessEmailInternal(ctx context.Con
}
func (s *OneTimeAccessService) CreateOneTimeAccessToken(ctx context.Context, userID string, ttl time.Duration) (token string, err error) {
- token, _, err = s.createOneTimeAccessTokenInternal(ctx, userID, ttl, false, s.db)
- return token, err
+ tx := s.db.Begin()
+ defer func() {
+ tx.Rollback()
+ }()
+
+ // Load the user to ensure it exists
+ _, err = s.userService.getUserInternal(ctx, userID, tx)
+ if errors.Is(err, gorm.ErrRecordNotFound) {
+ return "", &common.UserNotFoundError{}
+ } else if err != nil {
+ return "", err
+ }
+
+ // Create the one-time access token
+ token, _, err = s.createOneTimeAccessTokenInternal(ctx, userID, ttl, false, tx)
+ if err != nil {
+ return "", err
+ }
+
+ // Commit
+ err = tx.Commit().Error
+ if err != nil {
+ return "", err
+ }
+
+ return token, nil
}
func (s *OneTimeAccessService) createOneTimeAccessTokenInternal(ctx context.Context, userID string, ttl time.Duration, withDeviceToken bool, tx *gorm.DB) (token string, deviceToken *string, err error) {
diff --git a/backend/internal/service/scheduler.go b/backend/internal/service/scheduler.go
new file mode 100644
index 00000000..b53f195b
--- /dev/null
+++ b/backend/internal/service/scheduler.go
@@ -0,0 +1,25 @@
+package service
+
+import (
+ "context"
+
+ backoff "github.com/cenkalti/backoff/v5"
+ "github.com/go-co-op/gocron/v2"
+)
+
+// RegisterJobOpts holds optional configuration for registering a scheduled job.
+type RegisterJobOpts struct {
+ // RunImmediately runs the job immediately after registration.
+ RunImmediately bool
+ // ExtraOptions are additional gocron job options.
+ ExtraOptions []gocron.JobOption
+ // BackOff is an optional backoff strategy. If non-nil, the job will be wrapped
+ // with automatic retry logic using the provided backoff on transient failures.
+ BackOff backoff.BackOff
+}
+
+// Scheduler is an interface for registering and managing background jobs.
+type Scheduler interface {
+ RegisterJob(ctx context.Context, name string, def gocron.JobDefinition, job func(ctx context.Context) error, opts RegisterJobOpts) error
+ RemoveJob(name string) error
+}
diff --git a/backend/internal/service/scim_service.go b/backend/internal/service/scim_service.go
index 976e2cd5..f21f8be7 100644
--- a/backend/internal/service/scim_service.go
+++ b/backend/internal/service/scim_service.go
@@ -34,11 +34,6 @@ const scimErrorBodyLimit = 4096
type scimSyncAction int
-type Scheduler interface {
- RegisterJob(ctx context.Context, name string, def gocron.JobDefinition, job func(ctx context.Context) error, runImmediately bool, extraOptions ...gocron.JobOption) error
- RemoveJob(name string) error
-}
-
const (
scimActionNone scimSyncAction = iota
scimActionCreated
@@ -149,7 +144,7 @@ func (s *ScimService) ScheduleSync() {
err := s.scheduler.RegisterJob(
context.Background(), jobName,
- gocron.OneTimeJob(gocron.OneTimeJobStartDateTime(start)), s.SyncAll, false)
+ gocron.OneTimeJob(gocron.OneTimeJobStartDateTime(start)), s.SyncAll, RegisterJobOpts{})
if err != nil {
slog.Error("Failed to schedule SCIM sync", slog.Any("error", err))
@@ -168,7 +163,8 @@ func (s *ScimService) SyncAll(ctx context.Context) error {
errs = append(errs, ctx.Err())
break
}
- if err := s.SyncServiceProvider(ctx, provider.ID); err != nil {
+ err = s.SyncServiceProvider(ctx, provider.ID)
+ if err != nil {
errs = append(errs, fmt.Errorf("failed to sync SCIM provider %s: %w", provider.ID, err))
}
}
@@ -210,26 +206,20 @@ func (s *ScimService) SyncServiceProvider(ctx context.Context, serviceProviderID
}
var errs []error
- var userStats scimSyncStats
- var groupStats scimSyncStats
// Sync users first, so that groups can reference them
- if stats, err := s.syncUsers(ctx, provider, users, &userResources); err != nil {
- errs = append(errs, err)
- userStats = stats
- } else {
- userStats = stats
- }
-
- stats, err := s.syncGroups(ctx, provider, groups, groupResources.Resources, userResources.Resources)
+ userStats, err := s.syncUsers(ctx, provider, users, &userResources)
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ groupStats, err := s.syncGroups(ctx, provider, groups, groupResources.Resources, userResources.Resources)
if err != nil {
errs = append(errs, err)
- groupStats = stats
- } else {
- groupStats = stats
}
if len(errs) > 0 {
+ err = errors.Join(errs...)
slog.WarnContext(ctx, "SCIM sync completed with errors",
slog.String("provider_id", provider.ID),
slog.Int("error_count", len(errs)),
@@ -240,12 +230,14 @@ func (s *ScimService) SyncServiceProvider(ctx context.Context, serviceProviderID
slog.Int("groups_updated", groupStats.Updated),
slog.Int("groups_deleted", groupStats.Deleted),
slog.Duration("duration", time.Since(start)),
+ slog.Any("error", err),
)
- return errors.Join(errs...)
+ return err
}
provider.LastSyncedAt = new(datatype.DateTime(time.Now()))
- if err := s.db.WithContext(ctx).Save(&provider).Error; err != nil {
+ err = s.db.WithContext(ctx).Save(&provider).Error
+ if err != nil {
return err
}
@@ -273,7 +265,7 @@ func (s *ScimService) syncUsers(
// Update or create users
for _, u := range users {
- existing := getResourceByExternalID[dto.ScimUser](u.ID, resourceList.Resources)
+ existing := getResourceByExternalID(u.ID, resourceList.Resources)
action, created, err := s.syncUser(ctx, provider, u, existing)
if created != nil && existing == nil {
@@ -434,7 +426,7 @@ func (s *ScimService) syncGroup(
// Prepare group members
members := make([]dto.ScimGroupMember, len(group.Users))
for i, user := range group.Users {
- userResource := getResourceByExternalID[dto.ScimUser](user.ID, userResources)
+ userResource := getResourceByExternalID(user.ID, userResources)
if userResource == nil {
// Groups depend on user IDs already being provisioned
return scimActionNone, fmt.Errorf("cannot sync group %s: user %s is not provisioned in SCIM provider", group.ID, user.ID)
diff --git a/backend/internal/service/user_group_service.go b/backend/internal/service/user_group_service.go
index e55c9085..0e37a5ac 100644
--- a/backend/internal/service/user_group_service.go
+++ b/backend/internal/service/user_group_service.go
@@ -96,7 +96,10 @@ func (s *UserGroupService) Delete(ctx context.Context, id string) error {
return err
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return nil
}
@@ -126,7 +129,10 @@ func (s *UserGroupService) createInternal(ctx context.Context, input dto.UserGro
return model.UserGroup{}, err
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return group, nil
}
@@ -175,7 +181,10 @@ func (s *UserGroupService) updateInternal(ctx context.Context, id string, input
return model.UserGroup{}, err
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return group, nil
}
@@ -238,7 +247,10 @@ func (s *UserGroupService) updateUsersInternal(ctx context.Context, id string, u
return model.UserGroup{}, err
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return group, nil
}
@@ -315,6 +327,9 @@ func (s *UserGroupService) UpdateAllowedOidcClient(ctx context.Context, id strin
return model.UserGroup{}, err
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return group, nil
}
diff --git a/backend/internal/service/user_service.go b/backend/internal/service/user_service.go
index f0ad2369..f3ec4fbc 100644
--- a/backend/internal/service/user_service.go
+++ b/backend/internal/service/user_service.go
@@ -225,7 +225,10 @@ func (s *UserService) deleteUserInternal(ctx context.Context, tx *gorm.DB, userI
return fmt.Errorf("failed to delete user: %w", err)
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return nil
}
@@ -310,7 +313,10 @@ func (s *UserService) createUserInternal(ctx context.Context, input dto.UserCrea
}
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return user, nil
}
@@ -456,7 +462,10 @@ func (s *UserService) updateUserInternal(ctx context.Context, userID string, upd
return user, err
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return user, nil
}
@@ -515,7 +524,10 @@ func (s *UserService) UpdateUserGroups(ctx context.Context, id string, userGroup
return model.User{}, err
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return user, nil
}
@@ -576,7 +588,10 @@ func (s *UserService) disableUserInternal(ctx context.Context, tx *gorm.DB, user
return err
}
- s.scimService.ScheduleSync()
+ if s.scimService != nil {
+ s.scimService.ScheduleSync()
+ }
+
return nil
}
diff --git a/backend/resources/email-templates/api-key-expiring-soon_html.tmpl b/backend/resources/email-templates/api-key-expiring-soon_html.tmpl
index 8b52a5a0..b9b3bb5c 100644
--- a/backend/resources/email-templates/api-key-expiring-soon_html.tmpl
+++ b/backend/resources/email-templates/api-key-expiring-soon_html.tmpl
@@ -1 +1 @@
-{{define "root"}}
 | {{.AppName}} |
|
API Key Expiring Soon | Warning |
Hello {{.Data.Name}}, This is a reminder that your API key {{.Data.APIKeyName}} will expire on {{.Data.ExpiresAt.Format "2006-01-02 15:04:05 MST"}}. Please generate a new API key if you need continued access. |
|
{{end}}
\ No newline at end of file
+{{define "root"}} | {{.AppName}} |
|
API Key Expiring Soon | Warning |
Hello {{.Data.Name}}, This is a reminder that your API key {{.Data.ApiKeyName}} will expire on {{.Data.ExpiresAt.Format "2006-01-02 15:04:05 MST"}}. Please generate a new API key if you need continued access. |
|
{{end}}
\ No newline at end of file
diff --git a/backend/resources/email-templates/api-key-expiring-soon_text.tmpl b/backend/resources/email-templates/api-key-expiring-soon_text.tmpl
index ae7ba74b..247969d5 100644
--- a/backend/resources/email-templates/api-key-expiring-soon_text.tmpl
+++ b/backend/resources/email-templates/api-key-expiring-soon_text.tmpl
@@ -6,6 +6,6 @@ API KEY EXPIRING SOON
Warning
Hello {{.Data.Name}},
-This is a reminder that your API key {{.Data.APIKeyName}} will expire on {{.Data.ExpiresAt.Format "2006-01-02 15:04:05 MST"}}.
+This is a reminder that your API key {{.Data.ApiKeyName}} will expire on {{.Data.ExpiresAt.Format "2006-01-02 15:04:05 MST"}}.
Please generate a new API key if you need continued access.{{end}}
\ No newline at end of file
diff --git a/backend/resources/migrations/postgres/20260304090200_indexes.down.sql b/backend/resources/migrations/postgres/20260304090200_indexes.down.sql
new file mode 100644
index 00000000..f8e19576
--- /dev/null
+++ b/backend/resources/migrations/postgres/20260304090200_indexes.down.sql
@@ -0,0 +1 @@
+-- No-op
\ No newline at end of file
diff --git a/backend/resources/migrations/postgres/20260304090200_indexes.up.sql b/backend/resources/migrations/postgres/20260304090200_indexes.up.sql
new file mode 100644
index 00000000..b288f044
--- /dev/null
+++ b/backend/resources/migrations/postgres/20260304090200_indexes.up.sql
@@ -0,0 +1,6 @@
+CREATE INDEX IF NOT EXISTS idx_webauthn_sessions_expires_at ON webauthn_sessions (expires_at);
+CREATE INDEX IF NOT EXISTS idx_one_time_access_tokens_expires_at ON one_time_access_tokens (expires_at);
+CREATE INDEX IF NOT EXISTS idx_oidc_authorization_codes_expires_at ON oidc_authorization_codes (expires_at);
+CREATE INDEX IF NOT EXISTS idx_oidc_refresh_tokens_expires_at ON oidc_refresh_tokens (expires_at);
+CREATE INDEX IF NOT EXISTS idx_reauthentication_tokens_expires_at ON reauthentication_tokens (expires_at);
+CREATE INDEX IF NOT EXISTS idx_email_verification_tokens_expires_at ON email_verification_tokens (expires_at);
\ No newline at end of file
diff --git a/backend/resources/migrations/sqlite/20260304090200_indexes.down.sql b/backend/resources/migrations/sqlite/20260304090200_indexes.down.sql
new file mode 100644
index 00000000..f8e19576
--- /dev/null
+++ b/backend/resources/migrations/sqlite/20260304090200_indexes.down.sql
@@ -0,0 +1 @@
+-- No-op
\ No newline at end of file
diff --git a/backend/resources/migrations/sqlite/20260304090200_indexes.up.sql b/backend/resources/migrations/sqlite/20260304090200_indexes.up.sql
new file mode 100644
index 00000000..f8a32142
--- /dev/null
+++ b/backend/resources/migrations/sqlite/20260304090200_indexes.up.sql
@@ -0,0 +1,12 @@
+PRAGMA foreign_keys= OFF;
+BEGIN;
+
+CREATE INDEX IF NOT EXISTS idx_webauthn_sessions_expires_at ON webauthn_sessions (expires_at);
+CREATE INDEX IF NOT EXISTS idx_one_time_access_tokens_expires_at ON one_time_access_tokens (expires_at);
+CREATE INDEX IF NOT EXISTS idx_oidc_authorization_codes_expires_at ON oidc_authorization_codes (expires_at);
+CREATE INDEX IF NOT EXISTS idx_oidc_refresh_tokens_expires_at ON oidc_refresh_tokens (expires_at);
+CREATE INDEX IF NOT EXISTS idx_reauthentication_tokens_expires_at ON reauthentication_tokens (expires_at);
+CREATE INDEX IF NOT EXISTS idx_email_verification_tokens_expires_at ON email_verification_tokens (expires_at);
+
+COMMIT;
+PRAGMA foreign_keys=ON;
diff --git a/email-templates/emails/api-key-expiring-soon.tsx b/email-templates/emails/api-key-expiring-soon.tsx
index 6ddcc327..2a33987d 100644
--- a/email-templates/emails/api-key-expiring-soon.tsx
+++ b/email-templates/emails/api-key-expiring-soon.tsx
@@ -40,7 +40,7 @@ ApiKeyExpiringEmail.TemplateProps = {
...sharedTemplateProps,
data: {
name: "{{.Data.Name}}",
- apiKeyName: "{{.Data.APIKeyName}}",
+ apiKeyName: "{{.Data.ApiKeyName}}",
expiresAt: '{{.Data.ExpiresAt.Format "2006-01-02 15:04:05 MST"}}',
},
};
diff --git a/frontend/messages/cs.json b/frontend/messages/cs.json
index 8bf78ad7..643e230c 100644
--- a/frontend/messages/cs.json
+++ b/frontend/messages/cs.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Zadejte kód, který byl zobrazen v předchozím kroku.",
"authorize": "Autorizovat",
"federated_client_credentials": "Údaje o klientovi ve federaci",
+ "federated_client_credentials_description": "Federované klientské přihlašovací údaje umožňují ověřování klientů OIDC bez správy dlouhodobých tajných klíčů. Využívají tokeny JWT vydané třetími stranami pro klientská tvrzení, např. tokeny identity pracovního zatížení.",
"add_federated_client_credential": "Přidat údaje federovaného klienta",
"add_another_federated_client_credential": "Přidat dalšího federovaného klienta",
"oidc_allowed_group_count": "Počet povolených skupin",
diff --git a/frontend/messages/da.json b/frontend/messages/da.json
index 5d2b3336..baa18220 100644
--- a/frontend/messages/da.json
+++ b/frontend/messages/da.json
@@ -356,7 +356,7 @@
"login_code_email_success": "Loginkoden er sendt til brugeren.",
"send_email": "Send e-mail",
"show_code": "Vis kode",
- "callback_url_description": "URL(er) angivet af din klient. Tilføjes automatisk, hvis feltet efterlades tomt. Jokertegn understøttes.",
+ "callback_url_description": "URL(er) angivet af din klient. Tilføjes automatisk, hvis feltet efterlades tomt. Wildcards understøttes.",
"logout_callback_url_description": "URL(er) angivet af din klient til logout. Wildcards understøttes.",
"api_key_expiration": "Udløb af API-nøgle",
"send_an_email_to_the_user_when_their_api_key_is_about_to_expire": "Send en e-mail til brugeren, når deres API-nøgle er ved at udløbe.",
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Indtast koden, der blev vist i det forrige trin.",
"authorize": "Godkend",
"federated_client_credentials": "Federated klientlegitimationsoplysninger",
+ "federated_client_credentials_description": "Federerede klientlegitimationsoplysninger gør det muligt at autentificere OIDC-klienter uden at skulle administrere langvarige hemmeligheder. De udnytter JWT-tokens udstedt af tredjepartsmyndigheder til klientpåstande, f.eks. identitetstokens for arbejdsbelastning.",
"add_federated_client_credential": "Tilføj federated klientlegitimation",
"add_another_federated_client_credential": "Tilføj endnu en federated klientlegitimation",
"oidc_allowed_group_count": "Tilladt antal grupper",
@@ -445,7 +446,7 @@
"no_apps_available": "Ingen apps tilgængelige",
"contact_your_administrator_for_app_access": "Kontakt din administrator for at få adgang til applikationer.",
"launch": "Start",
- "client_launch_url": "Kundens lancerings-URL",
+ "client_launch_url": "Start-URL til klient",
"client_launch_url_description": "Den URL, der åbnes, når en bruger starter appen fra siden Mine apps.",
"client_name_description": "Navnet på den klient, der vises i Pocket ID-brugergrænsefladen.",
"revoke_access": "Tilbagekald adgang",
diff --git a/frontend/messages/de.json b/frontend/messages/de.json
index 79885fcc..64d1eb48 100644
--- a/frontend/messages/de.json
+++ b/frontend/messages/de.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Gib den Code ein, der im vorherigen Schritt angezeigt wurde.",
"authorize": "Autorisieren",
"federated_client_credentials": "Federated Client Credentials",
+ "federated_client_credentials_description": "Mit föderierten Client-Anmeldeinfos kann man OIDC-Clients authentifizieren, ohne sich um langlebige Geheimnisse kümmern zu müssen. Sie nutzen JWT-Token, die von Drittanbietern für Client-Assertions ausgestellt werden, z. B. Workload-Identitätstoken.",
"add_federated_client_credential": "Föderierte Client-Anmeldeinfos hinzufügen",
"add_another_federated_client_credential": "Weitere Anmeldeinformationen für einen Verbundclient hinzufügen",
"oidc_allowed_group_count": "Erlaubte Gruppenanzahl",
diff --git a/frontend/messages/es.json b/frontend/messages/es.json
index ac6fdd78..13f863be 100644
--- a/frontend/messages/es.json
+++ b/frontend/messages/es.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Introduce el código que se mostró en el paso anterior.",
"authorize": "Autorizar",
"federated_client_credentials": "Credenciales de cliente federadas",
+ "federated_client_credentials_description": "Las credenciales de cliente federadas permiten autenticar clientes OIDC sin gestionar secretos de larga duración. Aprovechan los tokens JWT emitidos por autoridades externas para las afirmaciones de los clientes, por ejemplo, tokens de identidad de carga de trabajo.",
"add_federated_client_credential": "Añadir credenciales de cliente federado",
"add_another_federated_client_credential": "Añadir otra credencial de cliente federado",
"oidc_allowed_group_count": "Recuento de grupos permitidos",
diff --git a/frontend/messages/et.json b/frontend/messages/et.json
index d3aedc7c..b4d819f7 100644
--- a/frontend/messages/et.json
+++ b/frontend/messages/et.json
@@ -1,6 +1,6 @@
{
"$schema": "https://inlang.com/schema/inlang-message-format",
- "my_account": "My Account",
+ "my_account": "Minu konto",
"logout": "Logout",
"confirm": "Confirm",
"docs": "Docs",
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Enter the code that was displayed in the previous step.",
"authorize": "Authorize",
"federated_client_credentials": "Federated Client Credentials",
+ "federated_client_credentials_description": "Föderatiivsed kliendi autentimisandmed võimaldavad OIDC-kliente autentida ilma pikaajalisi salajasi andmeid haldamata. Need kasutavad kolmandate osapoolte poolt väljastatud JWT-tokeneid kliendi kinnituste jaoks, nt töökoormuse identiteeditokeneid.",
"add_federated_client_credential": "Add Federated Client Credential",
"add_another_federated_client_credential": "Add another federated client credential",
"oidc_allowed_group_count": "Allowed Group Count",
diff --git a/frontend/messages/fi.json b/frontend/messages/fi.json
index 59d1f041..0150a08c 100644
--- a/frontend/messages/fi.json
+++ b/frontend/messages/fi.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Syötä edellisessä vaiheessa näkynyt koodi.",
"authorize": "Salli",
"federated_client_credentials": "Federoidut asiakastunnukset",
+ "federated_client_credentials_description": "Yhdistetyt asiakastunnistetiedot mahdollistavat OIDC-asiakkaiden todentamisen ilman pitkäaikaisten salaisuuksien hallintaa. Ne hyödyntävät kolmansien osapuolten viranomaisten myöntämiä JWT-tunnuksia asiakastodistuksiin, esimerkiksi työkuorman tunnistetunnuksiin.",
"add_federated_client_credential": "Lisää federoitu asiakastunnus",
"add_another_federated_client_credential": "Lisää toinen federoitu asiakastunnus",
"oidc_allowed_group_count": "Sallittujen ryhmien määrä",
diff --git a/frontend/messages/fr.json b/frontend/messages/fr.json
index 9ff785d4..37e82a1d 100644
--- a/frontend/messages/fr.json
+++ b/frontend/messages/fr.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Entrez le code affiché à l'étape précédente.",
"authorize": "Autoriser",
"federated_client_credentials": "Identifiants client fédérés",
+ "federated_client_credentials_description": "Les informations d'identification client fédérées permettent d'authentifier les clients OIDC sans avoir à gérer des secrets à long terme. Elles utilisent des jetons JWT émis par des autorités tierces pour les assertions client, par exemple des jetons d'identité de charge de travail.",
"add_federated_client_credential": "Ajouter un identifiant client fédéré",
"add_another_federated_client_credential": "Ajouter un autre identifiant client fédéré",
"oidc_allowed_group_count": "Nombre de groupes autorisés",
diff --git a/frontend/messages/it.json b/frontend/messages/it.json
index e4337b43..8d35ba97 100644
--- a/frontend/messages/it.json
+++ b/frontend/messages/it.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Inserisci il codice visualizzato nel passaggio precedente.",
"authorize": "Autorizza",
"federated_client_credentials": "Identità Federate",
+ "federated_client_credentials_description": "Le credenziali client federate ti permettono di autenticare i client OIDC senza dover gestire segreti a lungo termine. Usano i token JWT rilasciati da autorità terze per le asserzioni dei client, tipo i token di identità del carico di lavoro.",
"add_federated_client_credential": "Aggiungi Identità Federata",
"add_another_federated_client_credential": "Aggiungi un'altra identità federata",
"oidc_allowed_group_count": "Numero Gruppi Consentiti",
diff --git a/frontend/messages/ja.json b/frontend/messages/ja.json
index 2f480471..abaa2f43 100644
--- a/frontend/messages/ja.json
+++ b/frontend/messages/ja.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "前のステップで表示されたコードを入力してください。",
"authorize": "Authorize",
"federated_client_credentials": "連携クライアントの資格情報",
+ "federated_client_credentials_description": "フェデレーテッドクライアント認証情報は、長期にわたるシークレットを管理せずにOIDCクライアントを認証することを可能にします。これらは、クライアントアサーション(例:ワークロードIDトークン)のためにサードパーティ機関が発行するJWTトークンを活用します。",
"add_federated_client_credential": "Add Federated Client Credential",
"add_another_federated_client_credential": "Add another federated client credential",
"oidc_allowed_group_count": "許可されたグループ数",
diff --git a/frontend/messages/ko.json b/frontend/messages/ko.json
index 6ec07029..aecd899c 100644
--- a/frontend/messages/ko.json
+++ b/frontend/messages/ko.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "이전 단계에 표시된 코드를 입력하세요.",
"authorize": "승인",
"federated_client_credentials": "연동 클라이언트 자격 증명",
+ "federated_client_credentials_description": "연방 클라이언트 자격 증명은 장기 비밀을 관리하지 않고도 OIDC 클라이언트를 인증할 수 있게 합니다. 이는 클라이언트 어설션(예: 워크로드 신원 토큰)을 위해 제3자 기관이 발급한 JWT 토큰을 활용합니다.",
"add_federated_client_credential": "연동 클라이언트 자격 증명 추가",
"add_another_federated_client_credential": "다른 연동 클라이언트 자격 증명 추가",
"oidc_allowed_group_count": "허용된 그룹 수",
diff --git a/frontend/messages/nl.json b/frontend/messages/nl.json
index 958da44c..9292c6f2 100644
--- a/frontend/messages/nl.json
+++ b/frontend/messages/nl.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Voer de code in die in de vorige stap werd getoond.",
"authorize": "Autoriseren",
"federated_client_credentials": "Federatieve clientreferenties",
+ "federated_client_credentials_description": "Met federatieve klantgegevens kun je OIDC-klanten verifiëren zonder dat je langdurige geheimen hoeft te beheren. Ze gebruiken JWT-tokens die door externe instanties zijn uitgegeven voor klantverklaringen, zoals tokens voor werkbelastingidentiteit.",
"add_federated_client_credential": "Federatieve clientreferenties toevoegen",
"add_another_federated_client_credential": "Voeg nog een federatieve clientreferentie toe",
"oidc_allowed_group_count": "Aantal groepen met toegang",
diff --git a/frontend/messages/no.json b/frontend/messages/no.json
index 320291e5..5ddff904 100644
--- a/frontend/messages/no.json
+++ b/frontend/messages/no.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Enter the code that was displayed in the previous step.",
"authorize": "Authorize",
"federated_client_credentials": "Federated Client Credentials",
+ "federated_client_credentials_description": "Federated client credentials allow authenticating OIDC clients without managing long-lived secrets. They leverage JWT tokens issued by third-party authorities for client assertions, e.g. workload identity tokens.",
"add_federated_client_credential": "Add Federated Client Credential",
"add_another_federated_client_credential": "Add another federated client credential",
"oidc_allowed_group_count": "Allowed Group Count",
diff --git a/frontend/messages/pl.json b/frontend/messages/pl.json
index be63509c..58a614bd 100644
--- a/frontend/messages/pl.json
+++ b/frontend/messages/pl.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Wprowadź kod wyświetlony w poprzednim kroku.",
"authorize": "Autoryzuj",
"federated_client_credentials": "Połączone poświadczenia klienta",
+ "federated_client_credentials_description": "Połączone poświadczenia klienta umożliwiają uwierzytelnianie klientów OIDC bez konieczności zarządzania długotrwałymi sekretami. Wykorzystują one tokeny JWT wydane przez zewnętrzne organy do potwierdzania tożsamości klientów, np. tokeny tożsamości obciążenia.",
"add_federated_client_credential": "Dodaj poświadczenia klienta federacyjnego",
"add_another_federated_client_credential": "Dodaj kolejne poświadczenia klienta federacyjnego",
"oidc_allowed_group_count": "Dopuszczalna liczba grup",
diff --git a/frontend/messages/pt-BR.json b/frontend/messages/pt-BR.json
index 3d28baec..92eddd2f 100644
--- a/frontend/messages/pt-BR.json
+++ b/frontend/messages/pt-BR.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Digite o código que apareceu na etapa anterior.",
"authorize": "Autorizar",
"federated_client_credentials": "Credenciais de Cliente Federadas",
+ "federated_client_credentials_description": "As credenciais federadas do cliente permitem autenticar clientes OIDC sem precisar gerenciar segredos de longa duração. Elas usam tokens JWT emitidos por autoridades terceirizadas para afirmações do cliente, tipo tokens de identidade de carga de trabalho.",
"add_federated_client_credential": "Adicionar credencial de cliente federado",
"add_another_federated_client_credential": "Adicionar outra credencial de cliente federado",
"oidc_allowed_group_count": "Total de grupos permitidos",
diff --git a/frontend/messages/ru.json b/frontend/messages/ru.json
index 7774a94e..1fd6ce0f 100644
--- a/frontend/messages/ru.json
+++ b/frontend/messages/ru.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Введите код, который был отображен на предыдущем шаге.",
"authorize": "Авторизовать",
"federated_client_credentials": "Федеративные учетные данные клиента",
+ "federated_client_credentials_description": "Федеративные учетные данные клиента позволяют аутентифицировать клиентов OIDC без необходимости управления долгосрочными секретами. Они используют токены JWT, выданные сторонними органами для утверждений клиента, например токены идентификации рабочей нагрузки.",
"add_federated_client_credential": "Добавить федеративные учетные данные клиента",
"add_another_federated_client_credential": "Добавить другие федеративные учетные данные клиента",
"oidc_allowed_group_count": "Число разрешенных групп",
diff --git a/frontend/messages/sv.json b/frontend/messages/sv.json
index 2a46b86f..14d3d481 100644
--- a/frontend/messages/sv.json
+++ b/frontend/messages/sv.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Ange koden som visades i föregående steg.",
"authorize": "Godkänn",
"federated_client_credentials": "Federerade klientuppgifter",
+ "federated_client_credentials_description": "Federerade klientautentiseringsuppgifter gör det möjligt att autentisera OIDC-klienter utan att hantera långlivade hemligheter. De utnyttjar JWT-tokens som utfärdats av tredjepartsmyndigheter för klientpåståenden, t.ex. identitetstokens för arbetsbelastning.",
"add_federated_client_credential": "Lägg till federerad klientuppgift",
"add_another_federated_client_credential": "Lägg till ytterligare en federerad klientuppgift",
"oidc_allowed_group_count": "Tillåtet antal grupper",
diff --git a/frontend/messages/tr.json b/frontend/messages/tr.json
index 608939db..03b37e2e 100644
--- a/frontend/messages/tr.json
+++ b/frontend/messages/tr.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Önceki adımda görüntülenen kodu girin.",
"authorize": "Yetkilendir",
"federated_client_credentials": "Birleştirilmiş İstemci Kimlik Bilgileri",
+ "federated_client_credentials_description": "Birleştirilmiş istemci kimlik bilgileri, uzun süreli gizli bilgileri yönetmeden OIDC istemcilerinin kimlik doğrulamasını sağlar. Üçüncü taraf yetkililer tarafından istemci beyanları için verilen JWT belirteçlerini (ör. iş yükü kimlik belirteçleri) kullanır.",
"add_federated_client_credential": "Birleştirilmiş İstemci Kimlik Bilgisi Ekle",
"add_another_federated_client_credential": "Başka bir birleştirilmiş istemci kimlik bilgisi ekle",
"oidc_allowed_group_count": "İzin Verilen Grup Sayısı",
diff --git a/frontend/messages/uk.json b/frontend/messages/uk.json
index 06b548fe..8efc5223 100644
--- a/frontend/messages/uk.json
+++ b/frontend/messages/uk.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Введіть код, який було показано на попередньому кроці.",
"authorize": "Авторизувати",
"federated_client_credentials": "Федеративні облікові дані клієнта",
+ "federated_client_credentials_description": "Федеративні облікові дані клієнта дозволяють автентифікувати клієнтів OIDC без управління довготривалими секретами. Вони використовують токени JWT, видані сторонніми органами для підтвердження клієнтів, наприклад, токени ідентичності робочого навантаження.",
"add_federated_client_credential": "Додати федеративний обліковий запис клієнта",
"add_another_federated_client_credential": "Додати ще один федеративний обліковий запис клієнта",
"oidc_allowed_group_count": "Кількість дозволених груп",
diff --git a/frontend/messages/vi.json b/frontend/messages/vi.json
index 8d861e5a..c8522860 100644
--- a/frontend/messages/vi.json
+++ b/frontend/messages/vi.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "Nhập mã đã hiển thị ở bước trước.",
"authorize": "Cho phép",
"federated_client_credentials": "Thông Tin Xác Thực Của Federated Clients",
+ "federated_client_credentials_description": "Thông tin xác thực khách hàng liên kết cho phép xác thực các khách hàng OIDC mà không cần quản lý các khóa bí mật có thời hạn dài. Chúng sử dụng các token JWT do các cơ quan thứ ba cấp để xác thực thông tin của khách hàng, ví dụ như các token danh tính công việc.",
"add_federated_client_credential": "Thêm thông tin xác thực cho federated clients",
"add_another_federated_client_credential": "Thêm một thông tin xác thực cho federated clients khác",
"oidc_allowed_group_count": "Số lượng nhóm được phép",
diff --git a/frontend/messages/zh-CN.json b/frontend/messages/zh-CN.json
index 99b9a987..87a5a99c 100644
--- a/frontend/messages/zh-CN.json
+++ b/frontend/messages/zh-CN.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "输入在上一步中显示的代码",
"authorize": "授权",
"federated_client_credentials": "联合身份",
+ "federated_client_credentials_description": "联合客户端凭证允许在无需管理长期密钥的情况下验证OIDC客户端。该机制利用第三方机构签发的JWT令牌来验证客户端声明,例如工作负载身份令牌。",
"add_federated_client_credential": "添加联合身份",
"add_another_federated_client_credential": "再添加一个联合身份",
"oidc_allowed_group_count": "允许的群组数量",
diff --git a/frontend/messages/zh-TW.json b/frontend/messages/zh-TW.json
index eac55e18..a4c62bc7 100644
--- a/frontend/messages/zh-TW.json
+++ b/frontend/messages/zh-TW.json
@@ -365,6 +365,7 @@
"enter_code_displayed_in_previous_step": "請輸入上一步顯示的代碼。",
"authorize": "授權",
"federated_client_credentials": "聯邦身分",
+ "federated_client_credentials_description": "聯合客戶憑證允許驗證 OIDC 客戶端,無需管理長期存續的機密。此機制利用第三方權威機構發行的 JWT 憑證來驗證客戶端聲明,例如工作負載身分憑證。",
"add_federated_client_credential": "增加聯邦身分",
"add_another_federated_client_credential": "新增另一組聯邦身分",
"oidc_allowed_group_count": "允許的群組數量",
diff --git a/frontend/package.json b/frontend/package.json
index 4699106c..ac35091c 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -1,64 +1,64 @@
{
- "name": "pocket-id-frontend",
- "version": "2.3.0",
- "private": true,
- "type": "module",
- "scripts": {
- "preinstall": "npx only-allow pnpm",
- "dev": "vite dev --port 3000",
- "build": "vite build",
- "preview": "vite preview --port 3000",
- "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
- "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
- "lint": "prettier --check . && eslint .",
- "format": "prettier --write ."
- },
- "dependencies": {
- "@simplewebauthn/browser": "^13.2.2",
- "@tailwindcss/vite": "^4.2.0",
- "axios": "^1.13.5",
- "clsx": "^2.1.1",
- "date-fns": "^4.1.0",
- "jose": "^6.1.3",
- "qrcode": "^1.5.4",
- "runed": "^0.37.1",
- "sveltekit-superforms": "^2.30.0",
- "tailwind-merge": "^3.5.0",
- "zod": "^4.3.6"
- },
- "devDependencies": {
- "@inlang/paraglide-js": "^2.12.0",
- "@inlang/plugin-m-function-matcher": "^2.2.1",
- "@inlang/plugin-message-format": "^4.3.0",
- "@internationalized/date": "^3.11.0",
- "@lucide/svelte": "^0.559.0",
- "@sveltejs/adapter-static": "^3.0.10",
- "@sveltejs/kit": "^2.53.4",
- "@sveltejs/vite-plugin-svelte": "^6.2.4",
- "@types/eslint": "^9.6.1",
- "@types/node": "^24.10.13",
- "@types/qrcode": "^1.5.6",
- "bits-ui": "^2.16.2",
- "eslint": "^9.39.3",
- "eslint-config-prettier": "^10.1.8",
- "eslint-plugin-svelte": "^3.15.0",
- "formsnap": "^2.0.1",
- "globals": "^16.5.0",
- "mode-watcher": "^1.1.0",
- "prettier": "^3.8.1",
- "prettier-plugin-svelte": "^3.5.0",
- "prettier-plugin-tailwindcss": "^0.7.2",
- "rollup": "^4.59.0",
- "svelte": "^5.53.6",
- "svelte-check": "^4.4.3",
- "svelte-sonner": "^1.0.7",
- "tailwind-variants": "^3.2.2",
- "tailwindcss": "^4.2.0",
- "tslib": "^2.8.1",
- "tw-animate-css": "^1.4.0",
- "typescript": "^5.9.3",
- "typescript-eslint": "^8.56.0",
- "vite": "^7.3.1",
- "vite-plugin-compression": "^0.5.1"
- }
+ "name": "pocket-id-frontend",
+ "version": "2.4.0",
+ "private": true,
+ "type": "module",
+ "scripts": {
+ "preinstall": "npx only-allow pnpm",
+ "dev": "vite dev --port 3000",
+ "build": "vite build",
+ "preview": "vite preview --port 3000",
+ "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
+ "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
+ "lint": "prettier --check . && eslint .",
+ "format": "prettier --write ."
+ },
+ "dependencies": {
+ "@simplewebauthn/browser": "^13.2.2",
+ "@tailwindcss/vite": "^4.2.0",
+ "axios": "^1.13.5",
+ "clsx": "^2.1.1",
+ "date-fns": "^4.1.0",
+ "jose": "^6.1.3",
+ "qrcode": "^1.5.4",
+ "runed": "^0.37.1",
+ "sveltekit-superforms": "^2.30.0",
+ "tailwind-merge": "^3.5.0",
+ "zod": "^4.3.6"
+ },
+ "devDependencies": {
+ "@inlang/paraglide-js": "^2.12.0",
+ "@inlang/plugin-m-function-matcher": "^2.2.1",
+ "@inlang/plugin-message-format": "^4.3.0",
+ "@internationalized/date": "^3.11.0",
+ "@lucide/svelte": "^0.559.0",
+ "@sveltejs/adapter-static": "^3.0.10",
+ "@sveltejs/kit": "^2.53.4",
+ "@sveltejs/vite-plugin-svelte": "^6.2.4",
+ "@types/eslint": "^9.6.1",
+ "@types/node": "^24.10.13",
+ "@types/qrcode": "^1.5.6",
+ "bits-ui": "^2.16.2",
+ "eslint": "^9.39.3",
+ "eslint-config-prettier": "^10.1.8",
+ "eslint-plugin-svelte": "^3.15.0",
+ "formsnap": "^2.0.1",
+ "globals": "^16.5.0",
+ "mode-watcher": "^1.1.0",
+ "prettier": "^3.8.1",
+ "prettier-plugin-svelte": "^3.5.0",
+ "prettier-plugin-tailwindcss": "^0.7.2",
+ "rollup": "^4.59.0",
+ "svelte": "^5.53.6",
+ "svelte-check": "^4.4.3",
+ "svelte-sonner": "^1.0.7",
+ "tailwind-variants": "^3.2.2",
+ "tailwindcss": "^4.2.0",
+ "tslib": "^2.8.1",
+ "tw-animate-css": "^1.4.0",
+ "typescript": "^5.9.3",
+ "typescript-eslint": "^8.56.0",
+ "vite": "^7.3.1",
+ "vite-plugin-compression": "^0.5.1"
+ }
}
diff --git a/frontend/src/lib/services/user-service.ts b/frontend/src/lib/services/user-service.ts
index e577d513..cbf68241 100644
--- a/frontend/src/lib/services/user-service.ts
+++ b/frontend/src/lib/services/user-service.ts
@@ -72,7 +72,7 @@ export default class UserService extends APIService {
};
createOneTimeAccessToken = async (userId: string = 'me', ttl?: string | number) => {
- const res = await this.api.post(`/users/${userId}/one-time-access-token`, { userId, ttl });
+ const res = await this.api.post(`/users/${userId}/one-time-access-token`, { ttl });
return res.data.token;
};
diff --git a/frontend/src/routes/settings/admin/oidc-clients/oidc-callback-url-input.svelte b/frontend/src/routes/settings/admin/oidc-clients/oidc-callback-url-input.svelte
index cc261fa9..8587f943 100644
--- a/frontend/src/routes/settings/admin/oidc-clients/oidc-callback-url-input.svelte
+++ b/frontend/src/routes/settings/admin/oidc-clients/oidc-callback-url-input.svelte
@@ -32,6 +32,8 @@
aria-invalid={!!error}
data-testid={`callback-url-${i + 1}`}
type="text"
+ inputmode="url"
+ autocomplete="url"
bind:value={callbackURLs[i]}
/>