Просмотр исходного кода

chore: big refactoring job

master
Blaz Smehov 4 дней назад
Родитель
Сommit
3daee1984c
95 измененных файлов: 3411 добавлений и 4266 удалений
  1. +6
    -0
      .woodpecker.yml
  2. +138
    -0
      CODE_GRADE_AND_REFACTOR.md
  3. +0
    -288
      CODE_REVIEW_REPORT.md
  4. +14
    -5
      build/docker-compose.dev.yml
  5. +27
    -0
      build/docker-compose.woodpecker.yml
  6. +11
    -2
      build/docker-compose.yaml
  7. +7
    -188
      cmd/bridge/main.go
  8. +7
    -118
      cmd/decoder/main.go
  9. +8
    -187
      cmd/location/main.go
  10. +8
    -179
      cmd/server/main.go
  11. +0
    -748
      docs/API.md
  12. +0
    -1039
      docs/DEPLOYMENT.md
  13. Двоичные данные
      docs/Frame definition- B7,MWB01,MWC01.pdf
  14. +0
    -9
      docs/README.md
  15. +319
    -0
      docs/REFACTORING_OVERVIEW.md
  16. +2
    -0
      go.mod
  17. +4
    -0
      go.sum
  18. +0
    -21
      internal/README.md
  19. +0
    -0
      internal/app/_your_app_/.keep
  20. +122
    -0
      internal/app/bridge/app.go
  21. +91
    -0
      internal/app/decoder/app.go
  22. +100
    -0
      internal/app/location/app.go
  23. +145
    -0
      internal/app/server/app.go
  24. +51
    -0
      internal/app/server/events.go
  25. +59
    -0
      internal/app/server/routes.go
  26. +29
    -0
      internal/pkg/api/handler/health.go
  27. +26
    -0
      internal/pkg/api/middleware/cors.go
  28. +41
    -0
      internal/pkg/api/middleware/logging.go
  29. +22
    -0
      internal/pkg/api/middleware/recovery.go
  30. +22
    -0
      internal/pkg/api/middleware/requestid.go
  31. +55
    -0
      internal/pkg/api/response/response.go
  32. +10
    -6
      internal/pkg/apiclient/auth.go
  33. +26
    -29
      internal/pkg/apiclient/data.go
  34. +19
    -12
      internal/pkg/apiclient/updatedb.go
  35. +28
    -0
      internal/pkg/apiclient/utils.go
  36. +76
    -0
      internal/pkg/bridge/handler.go
  37. +61
    -0
      internal/pkg/bridge/mqtt.go
  38. +23
    -28
      internal/pkg/common/appcontext/context.go
  39. +92
    -12
      internal/pkg/config/config.go
  40. +7
    -3
      internal/pkg/controller/parser_controller.go
  41. +15
    -5
      internal/pkg/controller/settings_controller.go
  42. +12
    -6
      internal/pkg/controller/trackers_controller.go
  43. +3
    -3
      internal/pkg/database/database.go
  44. +71
    -0
      internal/pkg/decoder/process.go
  45. +7
    -3
      internal/pkg/kafkaclient/consumer.go
  46. +15
    -9
      internal/pkg/kafkaclient/manager.go
  47. +51
    -0
      internal/pkg/location/assign.go
  48. +96
    -0
      internal/pkg/location/filter.go
  49. +41
    -0
      internal/pkg/location/inference.go
  50. +7
    -5
      internal/pkg/logger/logger.go
  51. +3
    -0
      internal/pkg/model/parser.go
  52. +13
    -0
      internal/pkg/model/position.go
  53. +2
    -2
      internal/pkg/model/trackers.go
  54. +5
    -5
      internal/pkg/model/types.go
  55. +24
    -7
      internal/pkg/service/beacon_service.go
  56. +4
    -3
      internal/pkg/service/parser_service.go
  57. +0
    -40
      internal/structure.md
  58. +46
    -7
      scripts/README.md
  59. +2
    -0
      scripts/_common.sh
  60. +0
    -16
      scripts/adddecoder.sh
  61. +0
    -246
      scripts/api.sh
  62. +129
    -0
      scripts/api/smoke_test.sh
  63. +33
    -0
      scripts/api/tracks.sh
  64. +21
    -0
      scripts/auth/token.sh
  65. +19
    -0
      scripts/config/add_parser.sh
  66. +18
    -0
      scripts/config/settings.sh
  67. +0
    -46
      scripts/gatewayApi.sh
  68. +47
    -0
      scripts/seed/seed_trackers.sh
  69. +0
    -13
      scripts/settingsApi.sh
  70. +0
    -19
      scripts/testAPI.sh
  71. +0
    -248
      scripts/testalltrackers.sh
  72. +0
    -15
      scripts/token.sh
  73. +0
    -58
      scripts/trackerApi.sh
  74. +0
    -46
      scripts/trackerzonesApi.sh
  75. +0
    -61
      scripts/tracks.sh
  76. +0
    -44
      scripts/zonesApi.sh
  77. +50
    -145
      tests/TEST_SUMMARY.md
  78. +1
    -0
      tests/Untitled
  79. +148
    -0
      tests/appcontext/appcontext_test.go
  80. +4
    -4
      tests/bridge/integration_test.go
  81. +0
    -11
      tests/bridge/mqtt_handler_test.go
  82. +12
    -0
      tests/bridge/testutil.go
  83. +59
    -0
      tests/config/config_test.go
  84. +141
    -0
      tests/controller/controller_test.go
  85. +64
    -39
      tests/decoder/decode_test.go
  86. +32
    -38
      tests/decoder/event_loop_test.go
  87. +51
    -54
      tests/decoder/integration_test.go
  88. +21
    -194
      tests/decoder/parser_registry_test.go
  89. +17
    -0
      tests/e2e/e2e_test.go
  90. +87
    -0
      tests/kafkaclient/manager_test.go
  91. +41
    -0
      tests/location/location_test.go
  92. +28
    -0
      tests/logger/logger_test.go
  93. +106
    -0
      tests/model/model_test.go
  94. +99
    -0
      tests/service/service_test.go
  95. +110
    -0
      tests/utils/utils_test.go

+ 6
- 0
.woodpecker.yml Просмотреть файл

@@ -0,0 +1,6 @@
pipeline:
test:
image: golang:1.24.0
commands:
- go mod download
- go test ./...

+ 138
- 0
CODE_GRADE_AND_REFACTOR.md Просмотреть файл

@@ -0,0 +1,138 @@
# Code Grade & Production Readiness Report (Updated)

## Overall grade: **7.0 / 10**

The codebase has been refactored into a clear app/service layout with thin `cmd` entrypoints, shared `internal/pkg` libraries, health/readiness endpoints, structured middleware, and addressed reliability/security items. It is suitable for development and staging; production use still requires CORS restriction, optional metrics/tracing, and (if desired) request validation and OpenAPI.

---

## 1. What’s working well

| Area | Notes |
|------|--------|
| **Structure** | `cmd/<service>/main.go` is thin (~25 lines); `internal/app/*` holds per-service composition; `internal/pkg` has api (response, middleware, handler), location, bridge, decoder, config, kafkaclient, logger, model, controller, service, database, apiclient, appcontext. |
| **Concurrency** | Channels, `sync.WaitGroup`, and `AppState` with RWMutex; event loops live in app layer, not in main. |
| **Shutdown** | `signal.NotifyContext` + app `Run`/`Shutdown`; Kafka and MQTT cleanup in app. |
| **Kafka** | `KafkaManager`, generic `Consume[T]`, graceful close. |
| **Observability** | `/health` and `/ready` (DB ping); middleware: logging, recovery, request ID, CORS; logging to file with fallback to stderr if file open fails. |
| **Reliability** | No panics in library code for logger (fallback to stderr); MQTT connect returns error; server init returns error; `WriteMessages` errors checked in parser service and settings controller. |
| **Security** | TLS skip verify is configurable via `TLS_INSECURE_SKIP_VERIFY` (default false). |
| **Testing** | Unit tests for appcontext, utils, model, controller, service, config; integration tests for bridge/decoder. |
| **Dependencies** | Modern stack (slog, segmentio/kafka-go, gorilla/mux, gorm). |

---

## 2. Fixes applied since last report

### 2.1 Startup and library behavior

- **Bridge:** MQTT connect failure no longer panics; `internal/pkg/bridge/mqtt.go` returns error from `NewMQTTClient`, `cmd/bridge/main.go` exits with `log.Fatalf` on error.
- **Server:** DB and config init live in `internal/app/server`; `New`/`Init` return errors; `cmd/server/main.go` uses `log.Fatalf` on error (no panic in library).
- **Logger:** `CreateLogger` no longer uses `log.Fatalf`; on log file open failure it returns a logger that writes only to stderr and a no-op cleanup.

### 2.2 Ignored errors

- **parser_service.go:** `writer.WriteMessages(ctx, msg)` return value is checked and propagated.
- **settings_controller.go:** `writer.WriteMessages` error is checked; on failure returns 500 and logs; response sets `Content-Type: application/json`.
- **database:** Unused global `var DB *gorm.DB` removed.

### 2.3 Security and configuration

- **TLS:** `config.Config` has `TLSInsecureSkipVerify bool` (env `TLS_INSECURE_SKIP_VERIFY`, default false). Used in `apiclient.UpdateDB` and in location inference (`NewDefaultInferencer(cfg.TLSInsecureSkipVerify)`).
- **CORS:** Not changed (origin policy left to operator; middleware supports configurable origins).

### 2.4 Observability

- **Health/readiness:** Server exposes `/health` (liveness) and `/ready` (DB ping) via `internal/pkg/api/handler/health.go`.
- **Middleware:** Recovery (panic → 500), logging (method, path, status, duration), request ID (`X-Request-ID`), CORS.

### 2.5 Code quality

- **Bridge:** MQTT topic parsing uses `strings.SplitN(topic, "/", 2)` to avoid panic; CSV branch validates and logs (no writer usage yet).
- **Location:** Magic numbers moved to named constants in `internal/pkg/location/filter.go` (e.g. `SeenWeight`, `RSSIWeight`, `DefaultDistance`).
- **Duplication:** Bootstrap removed; each service uses `internal/app/<service>` for init, run, and shutdown.

---

## 3. Remaining / known limitations

### 3.1 Config and env

- **`getEnvPanic`** in `config` still panics on missing required env. To avoid panics in library, consider a `LoadServerSafe` (or similar) that returns `(*Config, error)` and use it only from `main` with explicit exit. Not changed in this pass.

### 3.2 Security

- **CORS:** Defaults remain permissive (e.g. `*`). Restrict to known frontend origins when deploying (e.g. via env or config).
- **Secrets:** Still loaded from env only; ensure no secrets in logs; consider a secret manager for production.

### 3.3 API and validation

- No OpenAPI/Swagger; no formal request/response contracts.
- Many handlers still use `http.Error` or `w.Write` without a single response helper; `api/response` exists for new/consistent endpoints.
- No request body validation (e.g. go-playground/validator); no idempotency keys.

### 3.4 Resilience and operations

- Kafka consumer: on `ReadMessage`/unmarshal error, logs and continues; no dead-letter or backoff yet.
- DB: no documented pool tuning; readiness only checks DB ping.
- No metrics (Prometheus/OpenTelemetry). No distributed tracing.

---

## 4. Grade breakdown (updated)

| Criterion | Score | Comment |
|---------------------|-------|--------|
| Architecture | 8/10 | Clear app layer, thin main, pkg separation; handlers still take concrete DB/writer (can be abstracted later). |
| Reliability | 7/10 | No panics in logger/bridge init; WriteMessages errors handled; health/ready; logger fallback. |
| Security | 6/10 | TLS skip verify configurable (default off); CORS still broad; secrets in env. |
| Observability | 7/10 | Health/ready, request logging, request ID, recovery; no metrics/tracing. |
| API design | 6/10 | Response helpers and middleware in place; many handlers still ad-hoc; no spec/validation. |
| Testing | 6/10 | Good unit coverage; more integration/E2E would help. |
| Code quality | 8/10 | Clear structure, constants for magic numbers, dead code removed, duplication reduced. |
| Production readiness | 6/10 | Health/ready and error handling in place; CORS, metrics, and validation still to do. |

**Average ≈ 6.75; grade 7.0/10** – Refactor and applied fixes significantly improve structure, reliability, and observability; remaining work is mostly CORS, validation, and metrics/tracing.

---

## 5. Checklist (updated)

### 5.1 Reliability

- [x] Remove panics / `log.Fatalf` from library where possible (logger fallback; bridge returns error).
- [x] Check and handle `WriteMessages` in parser service and settings controller.
- [x] Add `/health` and `/ready` on server.
- [ ] Document or add Kafka consumer retry/backoff and dead-letter if needed.
- [x] Make TLS skip verify configurable; default false.

### 5.2 Observability

- [x] Structured logging and request ID middleware.
- [ ] Add metrics (e.g. Prometheus) and optional tracing.

### 5.3 API and validation

- [ ] OpenAPI spec and validation.
- [ ] Consistent use of `api/response` and JSON error body across handlers.
- [ ] Restrict CORS to specific origins (operator-defined).

### 5.4 Operations

- [ ] Document env vars and deployment topology.
- [ ] Configurable timeouts; rate limiting if required.

### 5.5 Code and structure

- [x] Bridge topic parsing and CSV branch behavior clarified.
- [x] Unused `database.DB` global removed.
- [x] Location magic numbers moved to constants.
- [x] App layer and api/middleware/response in place.

---

## 6. Summary

- **Grade: 7.0/10** – Refactor and targeted fixes improve structure, reliability, and observability. Server has health/ready, middleware, and no panics in logger/bridge init; TLS skip verify is configurable; WriteMessages and logger errors are handled.
- **Still to do for production:** Restrict CORS, add metrics (and optionally tracing), validate requests and adopt consistent API responses, and document operations. Config loading can be made panic-free by adding safe loaders that return errors.
- **Not changed by design:** CORS policy left for operator to configure (e.g. via env or config).

+ 0
- 288
CODE_REVIEW_REPORT.md Просмотреть файл

@@ -1,288 +0,0 @@
# Code Review Report: AFASystems Presence Services

**Review Date:** February 11, 2025
**Scope:** Four services (bridge, server, location, decoder) and their internal packages
**Reviewer:** Automated Code Review

---

## Executive Summary

This report reviews the custom packages used across the four main services of the presence detection system. The codebase demonstrates a coherent architecture with clear separation of concerns, but several reliability issues, unused code paths, and refactoring opportunities were identified.

**Overall Rating: 6.5/10**

---

## 1. Package Inventory by Service

### Bridge (`cmd/bridge/main.go`)

| Package | Purpose |
| -------------------------------- | -------------------------------------------------- |
| `internal/pkg/common/appcontext` | Shared application state (beacon lookup, settings) |
| `internal/pkg/config` | Environment-based configuration |
| `internal/pkg/kafkaclient` | Kafka consumer/producer management |
| `internal/pkg/logger` | Structured logging setup |
| `internal/pkg/model` | Data structures |

### Server (`cmd/server/main.go`)

| Package | Purpose |
| -------------------------------- | --------------------------------------------- |
| `internal/pkg/apiclient` | External API authentication and data fetching |
| `internal/pkg/common/appcontext` | Shared application state |
| `internal/pkg/config` | Configuration |
| `internal/pkg/controller` | HTTP handlers for REST API |
| `internal/pkg/database` | PostgreSQL connection (GORM) |
| `internal/pkg/kafkaclient` | Kafka management |
| `internal/pkg/logger` | Logging |
| `internal/pkg/model` | Data structures |
| `internal/pkg/service` | Business logic (location, parser) |

### Location (`cmd/location/main.go`)

| Package | Purpose |
| -------------------------------- | ---------------------- |
| `internal/pkg/common/appcontext` | Beacon state, settings |
| `internal/pkg/common/utils` | Distance calculation |
| `internal/pkg/config` | Configuration |
| `internal/pkg/kafkaclient` | Kafka management |
| `internal/pkg/logger` | Logging |
| `internal/pkg/model` | Data structures |

### Decoder (`cmd/decoder/main.go`)

| Package | Purpose |
| -------------------------------- | ---------------------------------- |
| `internal/pkg/common/appcontext` | Beacon events state |
| `internal/pkg/common/utils` | AD structure parsing, flag removal |
| `internal/pkg/config` | Configuration |
| `internal/pkg/kafkaclient` | Kafka management |
| `internal/pkg/logger` | Logging |
| `internal/pkg/model` | Data structures, parser registry |

---

## 2. Critical Issues (Must Fix)

### 2.1 Tracker Delete Method Case Mismatch (Bug)

Resolved

### 2.2 Potential Panic in Location Algorithm

**Location:** `cmd/location/main.go:99-101`

Resolved

### 2.3 Hardcoded Config Path

**Location:** `cmd/server/main.go:60`

```go
configFile, err := os.Open("/app/cmd/server/config.json")
```

This path is Docker-specific and fails in local development or other deployment environments.

**Fix:** Use configurable path (e.g., `CONFIG_PATH` env var) or relative path based on executable location.

---

## 3. Security Concerns

### 3.1 Hardcoded Credentials

**Locations:**

- `internal/pkg/config/config.go`: Default values include `ClientSecret`, `HTTPPassword` with production-like strings
- `internal/pkg/apiclient/auth.go`: GetToken() hardcodes credentials in formData (lines 21-24) instead of using `cfg.HTTPClientID`, `cfg.ClientSecret`, etc.
- Config struct has `HTTPClientID`, `ClientSecret`, `HTTPUsername`, etc., but `auth.go` ignores them

**Recommendation:** Wire config values into auth; never commit production credentials.

### 3.2 TLS Verification Disabled

**Location:** `internal/pkg/apiclient/updatedb.go:21-22`

```go
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
```

**Recommendation:** Use proper certificates or make this configurable for dev only.

---

## 4. Unused Code & Directories

### 4.1 Orphaned Packages (Not Imported)

Resolved

### 4.2 Unused Functions / Methods

Resolved

### 4.3 Dead Code in bridge/mqtthandler

**Location:** `internal/pkg/bridge/mqtthandler/mqtthandler.go:75-83`

Resolved

### 4.4 Unnecessary Compile-Time Assertion

**Location:** `cmd/server/main.go:31`

```go
var _ io.Writer = (*os.File)(nil)
```

Redundant; `*os.File` implements `io.Writer`. Safe to remove.

### 4.5 Unused go.mod Dependencies

| Package | Notes |
| ------------------------------ | ------------------------------- |
| `github.com/boltdb/bolt` | Not imported in any source file |
| `github.com/yosssi/gmq` | Not imported |
| `github.com/gorilla/websocket` | Not imported |

**Recommendation:** Run `go mod tidy` after removing dead imports, or explicitly remove if kept for future use.

---

## 5. Reliability & Error Handling

### 5.1 Kafka Consumer Error Handling

**Location:** `internal/pkg/kafkaclient/consumer.go:20-23`

On `ReadMessage` or `Unmarshal` error, the consumer logs and continues. For `context.Canceled` or partition errors, this may cause tight loops. Consider backoff or bounded retries.

### 5.2 KafkaManager GetReader/GetWriter Lock Usage

**Location:** `internal/pkg/kafkaclient/manager.go:101-111`

`GetReader` and `GetWriter` hold the lock for the entire call including return. If the returned pointer is used after the lock is released, that's fine, but the pattern holds the lock longer than necessary. Prefer:

```go
func (m *KafkaManager) GetReader(topic string) *kafka.Reader {
m.kafkaReadersMap.KafkaReadersLock.RLock()
defer m.kafkaReadersMap.KafkaReadersLock.RUnlock()
return m.kafkaReadersMap.KafkaReaders[topic]
}
```

Use `RLock` for read-only access.

### 5.3 Logger File Handle Leak

**Location:** `internal/pkg/logger/logger.go`

The opened file `f` is never closed. For long-running processes this is usually acceptable (log files stay open), but worth documenting. If multiple loggers are created, each holds a file descriptor.

### 5.4 Silent JSON Unmarshal

**Location:** `cmd/server/main.go:68`

```go
json.Unmarshal(b, &configs)
```

Error is ignored. Invalid JSON would leave `configs` empty without feedback.

---

## 6. Code Quality & Maintainability

### 6.1 Inconsistent Logging

- Mix of `log.Printf`, `fmt.Println`, `fmt.Printf`, and `slog.Info/Error`
- Italian message: "Messaggio CSV non valido" in bridge
- Typo: "Beggining" → "Beginning" (bridge, location, decoder, server)

### 6.2 Magic Numbers

- Channel sizes: 200, 500, 2000 without named constants
- RSSI weights in location: `seenW := 1.5`, `rssiW := 0.75`
- Ticker intervals: 1s, 2s without configuration

### 6.3 Duplication

- Bridge defines `mqtthandler` inline while `internal/pkg/bridge/mqtthandler` exists with similar logic
- Both use `appcontext.BeaconExists` for lookup; bridge version also sets `adv.ID` from lookup

### 6.4 Parser ID Inconsistency

**Decoder** expects `msg.ID` values: `"add"`, `"delete"`, `"update"`.
**ParserDeleteController** sends `ID: "delete"` ✓
**ParserAddController** sends `ID: "add"` ✓
**ParserUpdateController** sends `ID: "update"` ✓

Decoder’s update case re-registers; add and update are effectively the same.

---

## 7. AppContext Thread Safety

`AppState` mixes safe and unsafe access:

- `beaconsLookup` (map) has no mutex; `AddBeaconToLookup`, `RemoveBeaconFromLookup`, `CleanLookup`, `BeaconExists` are not thread-safe
- Bridge goroutines (Kafka consumers + event loop) and MQTT handler may access it concurrently

**Recommendation:** Protect `beaconsLookup` with `sync.RWMutex` or use `sync.Map`.

---

## 8. Refactoring Suggestions

1. **Unify config loading:** Support JSON config file path via env; keep env overrides for sensitivity.
2. **Extract constants:** Kafka topic names, channel sizes, ticker intervals.
3. **Consolidate MQTT handling:** Use `internal/pkg/bridge/mqtthandler` and fix it, or remove the package and keep logic in bridge.
4. **API client:** Use config for URLs and credentials; add timeouts to HTTP client.
5. **Controllers:** Add request validation, consistent error responses, and structured error types.
6. **Service layer:** `formatMac` in `beacon_service.go` could move to `internal/pkg/common/utils` for reuse and testing.

---

## 9. Directory Structure Notes

| Directory | Status |
| ------------------------------------------ | ----------------------------------------------------- |
| `internal/app/_your_app_/` | Placeholder with `.keep`; safe to remove or repurpose |
| `internal/pkg/model/.keep` | Placeholder; low impact |
| `web/app/`, `web/static/`, `web/template/` | Empty except `.keep`; clarify if planned |
| `build/package/` | Contains Dockerfiles; structure is reasonable |

---

## 10. Summary of Recommendations

| Priority | Action |
| -------- | ------------------------------------------------------------------ |
| **P0** | Fix TrackerDelete `"Delete"` → `"DELETE"` |
| **P0** | Guard empty `BeaconMetrics` in location |
| **P1** | Make config.json path configurable |
| **P1** | Fix `beaconsLookup` concurrency in AppState |
| **P2** | Remove or integrate `internal/pkg/redis` and `internal/pkg/bridge` |
| **P2** | Remove unused functions (ValidateRSSI, EventToBeaconService, etc.) |
| **P2** | Replace hardcoded credentials in apiclient with config |
| **P3** | Unify logging (slog), fix typos, extract constants |
| **P3** | Run `go mod tidy` and drop unused dependencies |

---

## 11. Rating Breakdown

| Category | Score | Notes |
| ---------------- | ----- | ----------------------------------------------------------------------- |
| Architecture | 7/10 | Clear service boundaries; some shared-state issues |
| Reliability | 5/10 | Critical bugs (case mismatch, panic risk); error handling could improve |
| Security | 4/10 | Hardcoded credentials; disabled TLS verification |
| Maintainability | 6/10 | Duplication, magic numbers, inconsistent logging |
| Code Cleanliness | 5/10 | Unused code, dead packages, redundant assertions |

**Overall: 6.5/10**

The system has a solid foundation and sensible separation of concerns. Addressing the critical bugs, security issues, and removing dead code would materially improve reliability and maintainability.

+ 14
- 5
build/docker-compose.dev.yml Просмотреть файл

@@ -5,7 +5,7 @@ services:
container_name: db
restart: always
ports:
- "127.0.0.1:5432:5432"
- "127.0.0.1:5432:5432"
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
@@ -85,7 +85,7 @@ services:
condition: service_healthy
restart: always
volumes:
- ../:/app
- ../:/app
command: air --build.cmd "go build -buildvcs=false -o /tmp/decoder ./cmd/decoder" --build.bin "/tmp/decoder"
presense-server:
@@ -101,6 +101,14 @@ services:
- DBUser=postgres
- DBPass=postgres
- DBName=postgres
- HTTPClientID=Fastapi
- ClientSecret=wojuoB7Z5xhlPFrF2lIxJSSdVHCApEgC
- HTTPUsername=core
- HTTPPassword=C0r3_us3r_Cr3d3nt14ls
- HTTPAudience=Fastapi
- HTTPADDR=0.0.0.0:1902
- CONFIG_PATH=/app/cmd/server/config.json
- API_BASE_URL=https://10.251.0.30:5050
ports:
- "127.0.0.1:1902:1902"
depends_on:
@@ -112,7 +120,7 @@ services:
condition: service_healthy
restart: always
volumes:
- ../:/app
- ../:/app
command: air --build.cmd "go build -buildvcs=false -o /tmp/server ./cmd/server" --build.bin "/tmp/server"

presense-bridge:
@@ -126,6 +134,7 @@ services:
- MQTT_HOST=192.168.1.101
- MQTT_USERNAME=user
- MQTT_PASSWORD=pass
- MQTT_CLIENT_ID=bridge
depends_on:
kafka-init:
condition: service_completed_successfully
@@ -133,7 +142,7 @@ services:
condition: service_healthy
restart: always
volumes:
- ../:/app
- ../:/app
command: air --build.cmd "go build -buildvcs=false -o /tmp/bridge ./cmd/bridge" --build.bin "/tmp/bridge"

presense-location:
@@ -151,7 +160,7 @@ services:
condition: service_healthy
restart: always
volumes:
- ../:/app
- ../:/app
command: air --build.cmd "go build -buildvcs=false -o /tmp/location ./cmd/location" --build.bin "/tmp/location"



+ 27
- 0
build/docker-compose.woodpecker.yml Просмотреть файл

@@ -0,0 +1,27 @@
services:
woodpecker-server:
image: woodpeckerci/woodpecker-server:v1.0.2
ports:
- 8000:8000
volumes:
- woodpecker-data:/var/lib/woodpecker
environment:
- WOODPECKER_GITEA=true
- WOODPECKER_OPEN=true
- WOODPECKER_ADMIN=Smehov
- WOODPECKER_GITEA_URL=https://git.afasystems.it/
- WOODPECKER_GITEA_CLIENT=005e1420-b635-4d82-ac4b-70bfd61746ed
- WOODPECKER_GITEA_SECRET=rsfZ5jD4UcmrSl9mqutnHgO2eXBN-i8qNa-hil2SuMw=
- WOODPECKER_AGENT_SECRET=agent-secret
- WOODPECKER_HOST=http://10.8.0.53:8000

woodpecker-agent:
image: woodpeckerci/woodpecker-agent:v1.0.2
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- WOODPECKER_SERVER=woodpecker-server:9000
- WOODPECKER_AGENT_SECRET=agent-secret

volumes:
woodpecker-data:

+ 11
- 2
build/docker-compose.yaml Просмотреть файл

@@ -5,7 +5,7 @@ services:
container_name: db
restart: always
ports:
- "127.0.0.1:5432:5432"
- "127.0.0.1:5432:5432"
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
@@ -92,12 +92,20 @@ services:
image: presense-server
container_name: presense-server
environment:
- VALKEY_URL=valkey:6379
- KAFKA_URL=kafka:29092
- DBHost=db
- DBUser=postgres
- DBPass=postgres
- DBName=postgres
- HTTPClientID=Fastapi
- ClientSecret=wojuoB7Z5xhlPFrF2lIxJSSdVHCApEgC
- HTTPUsername=core
- HTTPPassword=C0r3_us3r_Cr3d3nt14ls
- HTTPAudience=Fastapi
- HTTPADDR=0.0.0.0:1902
- CONFIG_PATH=/app/cmd/server/config.json
- API_BASE_URL=https://10.251.0.30:5050
- API_AUTH_URL=https://10.251.0.30:10002
ports:
- "127.0.0.1:1902:1902"
depends_on:
@@ -120,6 +128,7 @@ services:
- MQTT_HOST=192.168.1.101
- MQTT_USERNAME=user
- MQTT_PASSWORD=pass
- MQTT_CLIENT_ID=bridge
depends_on:
kafka-init:
condition: service_completed_successfully


+ 7
- 188
cmd/bridge/main.go Просмотреть файл

@@ -2,205 +2,24 @@ package main

import (
"context"
"encoding/json"
"fmt"
"log"
"log/slog"
"os/signal"
"strings"
"sync"
"syscall"
"time"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/app/bridge"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/kafkaclient"
"github.com/AFASystems/presence/internal/pkg/logger"
"github.com/AFASystems/presence/internal/pkg/model"
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/google/uuid"
"github.com/segmentio/kafka-go"
)

var wg sync.WaitGroup

func mqtthandler(writer *kafka.Writer, topic string, message []byte, appState *appcontext.AppState) {
hostname := strings.Split(topic, "/")[1]
msgStr := string(message)

if strings.HasPrefix(msgStr, "[") {
var readings []model.RawReading
err := json.Unmarshal(message, &readings)
if err != nil {
log.Printf("Error parsing JSON: %v", err)
return
}

for _, reading := range readings {
if reading.Type == "Gateway" {
continue
}

val, ok := appState.BeaconExists(reading.MAC)
// fmt.Printf("reading: %+v\n", reading)
if !ok {
continue
}

adv := model.BeaconAdvertisement{
ID: val,
Hostname: hostname,
MAC: reading.MAC,
RSSI: int64(reading.RSSI),
Data: reading.RawData,
}

encodedMsg, err := json.Marshal(adv)
if err != nil {
fmt.Println("Error in marshaling: ", err)
break
}

msg := kafka.Message{
Value: encodedMsg,
}

err = writer.WriteMessages(context.Background(), msg)
if err != nil {
fmt.Println("Error in writing to Kafka: ", err)
time.Sleep(1 * time.Second)
break
}
}
} else {
s := strings.Split(string(message), ",")
if len(s) < 6 {
log.Printf("Messaggio CSV non valido: %s", msgStr)
return
}

fmt.Println("this gateway is also sending data: ", s)
}
}

var messagePubHandler = func(msg mqtt.Message, writer *kafka.Writer, appState *appcontext.AppState) {
mqtthandler(writer, msg.Topic(), msg.Payload(), appState)
}

var connectHandler mqtt.OnConnectHandler = func(client mqtt.Client) {
fmt.Println("Connected")
}

var connectLostHandler mqtt.ConnectionLostHandler = func(client mqtt.Client, err error) {
fmt.Printf("Connect lost: %v", err)
}

func main() {
// Load global context to init beacons and latest list
appState := appcontext.NewAppState()
cfg := config.Load()
kafkaManager := kafkaclient.InitKafkaManager()

// Set logger -> terminal and log file
slog.SetDefault(logger.CreateLogger("bridge.log"))

// define context
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT)
defer stop()

readerTopics := []string{"apibeacons", "alert", "mqtt"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "bridge", readerTopics)

writerTopics := []string{"rawbeacons"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "", writerTopics)

slog.Info("Bridge initialized, subscribed to kafka topics")

chApi := make(chan model.ApiUpdate, 200)
chAlert := make(chan model.Alert, 200)
chMqtt := make(chan []model.Tracker, 200)

wg.Add(3)
go kafkaclient.Consume(kafkaManager.GetReader("apibeacons"), chApi, ctx, &wg)
go kafkaclient.Consume(kafkaManager.GetReader("alert"), chAlert, ctx, &wg)
go kafkaclient.Consume(kafkaManager.GetReader("mqtt"), chMqtt, ctx, &wg)

opts := mqtt.NewClientOptions()
opts.AddBroker(fmt.Sprintf("tcp://%s:%d", cfg.MQTTHost, 1883))

cId := fmt.Sprintf("bridge-%s", uuid.New().String())

opts.SetClientID(cId)
opts.SetAutoReconnect(true)
opts.SetConnectRetry(true)
opts.SetConnectRetryInterval(1 * time.Second)
opts.SetMaxReconnectInterval(600 * time.Second)
opts.SetCleanSession(false)

opts.SetDefaultPublishHandler(func(c mqtt.Client, m mqtt.Message) {
messagePubHandler(m, kafkaManager.GetWriter("rawbeacons"), appState)
})
opts.OnConnect = connectHandler
opts.OnConnectionLost = connectLostHandler
client := mqtt.NewClient(opts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}

sub(client)

eventloop:
for {
select {
case <-ctx.Done():
break eventloop
case msg := <-chApi:
switch msg.Method {
case "POST":
id := msg.ID
appState.AddBeaconToLookup(msg.MAC, id)
lMsg := fmt.Sprintf("Beacon added to lookup: %s", id)
slog.Info(lMsg)
case "DELETE":
id := msg.MAC
if id == "all" {
appState.CleanLookup()
fmt.Println("cleaned up lookup map")
continue
}
appState.RemoveBeaconFromLookup(id)
lMsg := fmt.Sprintf("Beacon removed from lookup: %s", id)
slog.Info(lMsg)
}
case msg := <-chAlert:
p, err := json.Marshal(msg)
if err != nil {
continue
}
client.Publish("/alerts", 0, true, p)
case msg := <-chMqtt:
p, err := json.Marshal(msg)
if err != nil {
continue
}
client.Publish("/trackers", 0, true, p)
}
cfg := config.LoadBridge()
app, err := bridge.New(cfg)
if err != nil {
log.Fatalf("bridge: %v", err)
}

slog.Info("broken out of the main event loop")
wg.Wait()

slog.Info("All go routines have stopped, Beggining to close Kafka connections")
kafkaManager.CleanKafkaReaders()
kafkaManager.CleanKafkaWriters()

client.Disconnect(250)
slog.Info("Closing connection to MQTT broker")
}

func sub(client mqtt.Client) {
topic := "publish_out/#"
token := client.Subscribe(topic, 1, nil)
token.Wait()
fmt.Printf("Subscribed to topic: %s\n", topic)
app.Run(ctx)
app.Shutdown()
}

+ 7
- 118
cmd/decoder/main.go Просмотреть файл

@@ -1,136 +1,25 @@
package main

import (
"bytes"
"context"
"encoding/hex"
"fmt"
"log/slog"
"log"
"os/signal"
"strings"
"sync"
"syscall"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/common/utils"
"github.com/AFASystems/presence/internal/app/decoder"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/kafkaclient"
"github.com/AFASystems/presence/internal/pkg/logger"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/segmentio/kafka-go"
)

var wg sync.WaitGroup

func main() {
// Load global context to init beacons and latest list
appState := appcontext.NewAppState()
cfg := config.Load()
kafkaManager := kafkaclient.InitKafkaManager()

parserRegistry := model.ParserRegistry{
ParserList: make(map[string]model.BeaconParser),
}

// Set logger -> terminal and log file
slog.SetDefault(logger.CreateLogger("decoder.log"))

// define context
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT)
defer stop()

readerTopics := []string{"rawbeacons", "parser"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "decoder", readerTopics)

writerTopics := []string{"alertbeacons"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "", writerTopics)

slog.Info("Decoder initialized, subscribed to Kafka topics")

chRaw := make(chan model.BeaconAdvertisement, 2000)
chParser := make(chan model.KafkaParser, 200)

wg.Add(3)
go kafkaclient.Consume(kafkaManager.GetReader("rawbeacons"), chRaw, ctx, &wg)
go kafkaclient.Consume(kafkaManager.GetReader("parser"), chParser, ctx, &wg)

eventloop:
for {
select {
case <-ctx.Done():
break eventloop
case msg := <-chRaw:
processIncoming(msg, appState, kafkaManager.GetWriter("alertbeacons"), &parserRegistry)
case msg := <-chParser:
switch msg.ID {
case "add":
config := msg.Config
parserRegistry.Register(config.Name, config)
case "delete":
parserRegistry.Unregister(msg.Name)
case "update":
config := msg.Config
parserRegistry.Register(config.Name, config)
}
}
}

slog.Info("broken out of the main event loop")
wg.Wait()

slog.Info("All go routines have stopped, Beggining to close Kafka connections")
kafkaManager.CleanKafkaReaders()
kafkaManager.CleanKafkaWriters()
}

func processIncoming(adv model.BeaconAdvertisement, appState *appcontext.AppState, writer *kafka.Writer, parserRegistry *model.ParserRegistry) {
err := decodeBeacon(adv, appState, writer, parserRegistry)
if err != nil {
eMsg := fmt.Sprintf("Error in decoding: %v", err)
fmt.Println(eMsg)
return
}
}

func decodeBeacon(adv model.BeaconAdvertisement, appState *appcontext.AppState, writer *kafka.Writer, parserRegistry *model.ParserRegistry) error {
beacon := strings.TrimSpace(adv.Data)
id := adv.ID
if beacon == "" {
return nil
}

b, err := hex.DecodeString(beacon)
cfg := config.LoadDecoder()
app, err := decoder.New(cfg)
if err != nil {
return err
}

b = utils.RemoveFlagBytes(b)

indeces := utils.ParseADFast(b)
event := utils.LoopADStructures(b, indeces, id, parserRegistry)

if event.ID == "" {
return nil
}
prevEvent, ok := appState.GetBeaconEvent(id)
appState.UpdateBeaconEvent(id, event)

if event.Type == "iBeacon" {
event.BtnPressed = true
}

if ok && bytes.Equal(prevEvent.Hash(), event.Hash()) {
return nil
}

eMsg, err := event.ToJSON()
if err != nil {
return err
}

if err := writer.WriteMessages(context.Background(), kafka.Message{Value: eMsg}); err != nil {
return err
log.Fatalf("decoder: %v", err)
}

return nil
app.Run(ctx)
app.Shutdown()
}

+ 8
- 187
cmd/location/main.go Просмотреть файл

@@ -2,203 +2,24 @@ package main

import (
"context"
"encoding/json"
"fmt"
"log/slog"
"log"
"os/signal"
"sync"
"syscall"
"time"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/common/utils"
"github.com/AFASystems/presence/internal/app/location"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/kafkaclient"
"github.com/AFASystems/presence/internal/pkg/logger"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/segmentio/kafka-go"
)

var wg sync.WaitGroup

func main() {
// Load global context to init beacons and latest list
appState := appcontext.NewAppState()
cfg := config.Load()
kafkaManager := kafkaclient.InitKafkaManager()

// Set logger -> terminal and log file
slog.SetDefault(logger.CreateLogger("location.log"))

// Define context
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT)
defer stop()

readerTopics := []string{"rawbeacons", "settings"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "location", readerTopics)

writerTopics := []string{"locevents"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "", writerTopics)

slog.Info("Locations algorithm initialized, subscribed to Kafka topics")

locTicker := time.NewTicker(1 * time.Second)
defer locTicker.Stop()

chRaw := make(chan model.BeaconAdvertisement, 2000)
chSettings := make(chan map[string]any, 5)

wg.Add(3)
go kafkaclient.Consume(kafkaManager.GetReader("rawbeacons"), chRaw, ctx, &wg)
go kafkaclient.Consume(kafkaManager.GetReader("settings"), chSettings, ctx, &wg)

eventLoop:
for {
select {
case <-ctx.Done():
break eventLoop
case <-locTicker.C:
settings := appState.GetSettings()
fmt.Printf("Settings: %+v\n", settings)
switch settings.CurrentAlgorithm {
case "filter":
getLikelyLocations(appState, kafkaManager.GetWriter("locevents"))
case "ai":
fmt.Println("AI algorithm selected")
}
case msg := <-chRaw:
assignBeaconToList(msg, appState)
case msg := <-chSettings:
fmt.Printf("settings msg: %+v\n", msg)
appState.UpdateSettings(msg)
}
}

slog.Info("broken out of the main event loop")
wg.Wait()

slog.Info("All go routines have stopped, Beggining to close Kafka connections")
kafkaManager.CleanKafkaReaders()
kafkaManager.CleanKafkaWriters()
}

func getLikelyLocations(appState *appcontext.AppState, writer *kafka.Writer) {
beacons := appState.GetAllBeacons()
settings := appState.GetSettingsValue()

for _, beacon := range beacons {
// Shrinking the model because other properties have nothing to do with the location
r := model.HTTPLocation{
Method: "Standard",
Distance: 999,
ID: beacon.ID,
Location: "",
LastSeen: 999,
}

mSize := len(beacon.BeaconMetrics)

if mSize == 0 {
continue
}

if (int64(time.Now().Unix()) - (beacon.BeaconMetrics[mSize-1].Timestamp)) > settings.LastSeenThreshold {
slog.Warn("beacon is too old")
continue
}

locList := make(map[string]float64)
seenW := 1.5
rssiW := 0.75
for _, metric := range beacon.BeaconMetrics {
res := seenW + (rssiW * (1.0 - (float64(metric.RSSI) / -100.0)))
locList[metric.Location] += res
}

bestLocName := ""
maxScore := 0.0
for locName, score := range locList {
if score > maxScore {
maxScore = score
bestLocName = locName
}
}

if bestLocName == beacon.PreviousLocation {
beacon.LocationConfidence++
} else {
beacon.LocationConfidence = 0
}

r.Distance = beacon.BeaconMetrics[mSize-1].Distance
r.Location = bestLocName
r.LastSeen = beacon.BeaconMetrics[mSize-1].Timestamp
r.RSSI = beacon.BeaconMetrics[mSize-1].RSSI

if beacon.LocationConfidence == settings.LocationConfidence && beacon.PreviousConfidentLocation != bestLocName {
beacon.LocationConfidence = 0
}

beacon.PreviousLocation = bestLocName
appState.UpdateBeacon(beacon.ID, beacon)

js, err := json.Marshal(r)
if err != nil {
eMsg := fmt.Sprintf("Error in marshaling location: %v", err)
slog.Error(eMsg)
continue
}

msg := kafka.Message{
Value: js,
}

err = writer.WriteMessages(context.Background(), msg)
if err != nil {
eMsg := fmt.Sprintf("Error in sending Kafka message: %v", err)
slog.Error(eMsg)
}
}
}

func assignBeaconToList(adv model.BeaconAdvertisement, appState *appcontext.AppState) {
id := adv.ID
now := time.Now().Unix()

settings := appState.GetSettingsValue()

if settings.RSSIEnforceThreshold && (int64(adv.RSSI) < settings.RSSIMinThreshold) {
slog.Info("Settings returns")
return
}

beacon, ok := appState.GetBeacon(id)
if !ok {
beacon = model.Beacon{
ID: id,
}
}

beacon.IncomingJSON = adv
beacon.LastSeen = now

if beacon.BeaconMetrics == nil {
beacon.BeaconMetrics = make([]model.BeaconMetric, 0, settings.BeaconMetricSize)
}

metric := model.BeaconMetric{
Distance: utils.CalculateDistance(adv),
Timestamp: now,
RSSI: int64(adv.RSSI),
Location: adv.Hostname,
}

if len(beacon.BeaconMetrics) >= settings.BeaconMetricSize {
copy(beacon.BeaconMetrics, beacon.BeaconMetrics[1:])
beacon.BeaconMetrics[settings.BeaconMetricSize-1] = metric
} else {
beacon.BeaconMetrics = append(beacon.BeaconMetrics, metric)
cfg := config.LoadLocation()
app, err := location.New(cfg)
if err != nil {
log.Fatalf("location: %v", err)
}

appState.UpdateBeacon(id, beacon)
app.Run(ctx)
app.Shutdown()
}

+ 8
- 179
cmd/server/main.go Просмотреть файл

@@ -2,198 +2,27 @@ package main

import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"log/slog"
"net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"

"github.com/AFASystems/presence/internal/pkg/apiclient"
"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/app/server"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/controller"
"github.com/AFASystems/presence/internal/pkg/database"
"github.com/AFASystems/presence/internal/pkg/kafkaclient"
"github.com/AFASystems/presence/internal/pkg/logger"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/AFASystems/presence/internal/pkg/service"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/segmentio/kafka-go"
)

var _ io.Writer = (*os.File)(nil)
var wg sync.WaitGroup

func main() {
cfg := config.Load()
appState := appcontext.NewAppState()
kafkaManager := kafkaclient.InitKafkaManager()

// Set logger -> terminal and log file
slog.SetDefault(logger.CreateLogger("server.log"))

// define context
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT)
defer stop()

db, err := database.Connect(cfg)
cfg := config.LoadServer()
app, err := server.New(cfg)
if err != nil {
log.Fatalf("Failed to open database connection: %v\n", err)
}

headersOk := handlers.AllowedHeaders([]string{"X-Requested-With"})
originsOk := handlers.AllowedOrigins([]string{"*"})
methodsOk := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"})

writerTopics := []string{"apibeacons", "alert", "mqtt", "settings", "parser"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "", writerTopics)

slog.Info("Kafka writers topics: apibeacons, settings initialized")

configFile, err := os.Open("/app/cmd/server/config.json")
if err != nil {
panic(err)
}

b, _ := io.ReadAll(configFile)

var configs []model.Config
json.Unmarshal(b, &configs)

for _, config := range configs {
// persist read configs in database
db.Create(&config)
}

db.Find(&configs)
for _, config := range configs {
kp := model.KafkaParser{
ID: "add",
Config: config,
}

if err := service.SendParserConfig(kp, kafkaManager.GetWriter("parser"), ctx); err != nil {
fmt.Printf("Unable to send parser config to kafka broker %v\n", err)
}
}

if err := apiclient.UpdateDB(db, ctx, cfg, kafkaManager.GetWriter("apibeacons"), appState); err != nil {
fmt.Printf("Error in getting token: %v\n", err)
}

readerTopics := []string{"locevents", "alertbeacons"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "server", readerTopics)
slog.Info("Kafka readers topics: locevents, alertbeacons initialized")

chLoc := make(chan model.HTTPLocation, 200)
chEvents := make(chan model.BeaconEvent, 500)

wg.Add(2)
go kafkaclient.Consume(kafkaManager.GetReader("locevents"), chLoc, ctx, &wg)
go kafkaclient.Consume(kafkaManager.GetReader("alertbeacons"), chEvents, ctx, &wg)

r := mux.NewRouter()

r.HandleFunc("/reslevis/getGateways", controller.GatewayListController(db)).Methods("GET")
r.HandleFunc("/reslevis/postGateway", controller.GatewayAddController(db)).Methods("POST")
r.HandleFunc("/reslevis/removeGateway/{id}", controller.GatewayDeleteController(db)).Methods("DELETE")
r.HandleFunc("/reslevis/updateGateway/{id}", controller.GatewayUpdateController(db)).Methods("PUT")

r.HandleFunc("/reslevis/getZones", controller.ZoneListController(db)).Methods("GET")
r.HandleFunc("/reslevis/postZone", controller.ZoneAddController(db)).Methods("POST")
r.HandleFunc("/reslevis/removeZone/{id}", controller.ZoneDeleteController(db)).Methods("DELETE")
r.HandleFunc("/reslevis/updateZone", controller.ZoneUpdateController(db)).Methods("PUT")

r.HandleFunc("/reslevis/getTrackerZones", controller.TrackerZoneListController(db)).Methods("GET")
r.HandleFunc("/reslevis/postTrackerZone", controller.TrackerZoneAddController(db)).Methods("POST")
r.HandleFunc("/reslevis/removeTrackerZone/{id}", controller.TrackerZoneDeleteController(db)).Methods("DELETE")
r.HandleFunc("/reslevis/updateTrackerZone", controller.TrackerZoneUpdateController(db)).Methods("PUT")

r.HandleFunc("/reslevis/getTrackers", controller.TrackerList(db)).Methods("GET")
r.HandleFunc("/reslevis/postTracker", controller.TrackerAdd(db, kafkaManager.GetWriter("apibeacons"), ctx)).Methods("POST")
r.HandleFunc("/reslevis/removeTracker/{id}", controller.TrackerDelete(db, kafkaManager.GetWriter("apibeacons"), ctx)).Methods("DELETE")
r.HandleFunc("/reslevis/updateTracker", controller.TrackerUpdate(db)).Methods("PUT")

r.HandleFunc("/configs/beacons", controller.ParserListController(db)).Methods("GET")
r.HandleFunc("/configs/beacons", controller.ParserAddController(db, kafkaManager.GetWriter("parser"), ctx)).Methods("POST")
r.HandleFunc("/configs/beacons/{id}", controller.ParserUpdateController(db, kafkaManager.GetWriter("parser"), ctx)).Methods("PUT")
r.HandleFunc("/configs/beacons/{id}", controller.ParserDeleteController(db, kafkaManager.GetWriter("parser"), ctx)).Methods("DELETE")

r.HandleFunc("/reslevis/settings", controller.SettingsUpdateController(db, kafkaManager.GetWriter("settings"), ctx)).Methods("PATCH")
r.HandleFunc("/reslevis/settings", controller.SettingsListController(db)).Methods("GET")

r.HandleFunc("/reslevis/getTracks/{id}", controller.TracksListController(db)).Methods("GET")

beaconTicker := time.NewTicker(2 * time.Second)

restApiHandler := handlers.CORS(originsOk, headersOk, methodsOk)(r)
mainHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
restApiHandler.ServeHTTP(w, r)
})

server := http.Server{
Addr: cfg.HTTPAddr,
Handler: mainHandler,
}

go server.ListenAndServe()

eventLoop:
for {
select {
case <-ctx.Done():
break eventLoop
case msg := <-chLoc:
service.LocationToBeaconService(msg, db, kafkaManager.GetWriter("alert"), ctx)
case msg := <-chEvents:
fmt.Printf("event: %+v\n", msg)
id := msg.ID
if err := db.First(&model.Tracker{}, "id = ?", id).Error; err != nil {
fmt.Printf("Decoder event for untracked beacon: %s\n", id)
continue
}

if err := db.Updates(&model.Tracker{ID: id, Battery: msg.Battery, Temperature: msg.Temperature}).Error; err != nil {
fmt.Printf("Error in saving decoder event for beacon: %s\n", id)
continue
}
case <-beaconTicker.C:
var list []model.Tracker
db.Find(&list)
eMsg, err := json.Marshal(list)
if err != nil {
fmt.Printf("Error in marshaling trackers list: %v\n", err)
continue
}

msg := kafka.Message{
Value: eMsg,
}

kafkaManager.GetWriter("mqtt").WriteMessages(ctx, msg)
}
log.Fatalf("server: %v", err)
}

if err := server.Shutdown(context.Background()); err != nil {
eMsg := fmt.Sprintf("could not shutdown: %v\n", err)
slog.Error(eMsg)
if err := app.Init(ctx); err != nil {
log.Fatalf("server init: %v", err)
}

slog.Info("API SERVER: \n")
slog.Warn("broken out of the main event loop and HTTP server shutdown\n")
wg.Wait()

slog.Info("All go routines have stopped, Beggining to close Kafka connections\n")
kafkaManager.CleanKafkaReaders()
kafkaManager.CleanKafkaWriters()

slog.Info("All kafka clients shutdown, starting shutdown of valkey client")
slog.Info("API server shutting down")
app.Run(ctx)
app.Shutdown()
}

+ 0
- 748
docs/API.md Просмотреть файл

@@ -1,748 +0,0 @@
# API Documentation

## Overview

The AFA Systems Presence Detection API provides RESTful endpoints for managing beacons, settings, and real-time WebSocket communication for live updates.

## Base URL

```
http://localhost:8080
```

## Authentication

Currently, the API does not implement authentication. This should be added for production deployments.

## REST API Endpoints

### Beacon Management

#### Get All Beacons
Retrieves a list of all registered beacons with their current status and location information.

```http
GET /api/beacons
```

**Response:**
```json
{
"beacons": [
{
"name": "Conference Room Beacon",
"beacon_id": "beacon_001",
"beacon_type": "ingics",
"beacon_location": "conference_room",
"last_seen": 1703078400,
"distance": 2.5,
"location_confidence": 85,
"hs_button_counter": 42,
"hs_button_battery": 85,
"hs_button_random": "abc123",
"hs_button_mode": "normal"
}
]
}
```

#### Create Beacon
Registers a new beacon in the system.

```http
POST /api/beacons
Content-Type: application/json
```

**Request Body:**
```json
{
"name": "Meeting Room Beacon",
"beacon_id": "beacon_002",
"beacon_type": "eddystone",
"beacon_location": "meeting_room",
"hs_button_counter": 0,
"hs_button_battery": 100
}
```

**Response:**
```json
{
"message": "Beacon created successfully",
"beacon": {
"name": "Meeting Room Beacon",
"beacon_id": "beacon_002",
"beacon_type": "eddystone",
"beacon_location": "meeting_room",
"last_seen": 0,
"distance": 0,
"location_confidence": 0,
"hs_button_counter": 0,
"hs_button_battery": 100
}
}
```

#### Update Beacon
Updates an existing beacon's information.

```http
PUT /api/beacons/{id}
Content-Type: application/json
```

**Path Parameters:**
- `id` (string): The beacon ID to update

**Request Body:**
```json
{
"name": "Updated Conference Room Beacon",
"beacon_location": "main_conference",
"location_confidence": 90
}
```

**Response:**
```json
{
"message": "Beacon updated successfully",
"beacon": {
"name": "Updated Conference Room Beacon",
"beacon_id": "beacon_001",
"beacon_type": "ingics",
"beacon_location": "main_conference",
"last_seen": 1703078400,
"distance": 2.5,
"location_confidence": 90,
"hs_button_counter": 42,
"hs_button_battery": 85
}
}
```

#### Delete Beacon
Removes a beacon from the system.

```http
DELETE /api/beacons/{id}
```

**Path Parameters:**
- `id` (string): The beacon ID to delete

**Response:**
```json
{
"message": "Beacon deleted successfully"
}
```

### Settings Management

#### Get System Settings
Retrieves current system configuration settings.

```http
GET /api/settings
```

**Response:**
```json
{
"settings": {
"location_confidence": 80,
"last_seen_threshold": 300,
"beacon_metrics_size": 10,
"ha_send_interval": 60,
"ha_send_changes_only": true,
"rssi_min_threshold": -90,
"enforce_rssi_threshold": true
}
}
```

#### Update System Settings
Updates system configuration settings.

```http
POST /api/settings
Content-Type: application/json
```

**Request Body:**
```json
{
"location_confidence": 85,
"last_seen_threshold": 600,
"beacon_metrics_size": 15,
"ha_send_interval": 30,
"rssi_min_threshold": -85
}
```

**Response:**
```json
{
"message": "Settings updated successfully",
"settings": {
"location_confidence": 85,
"last_seen_threshold": 600,
"beacon_metrics_size": 15,
"ha_send_interval": 30,
"ha_send_changes_only": true,
"rssi_min_threshold": -85,
"enforce_rssi_threshold": true
}
}
```

### Location Information

#### Get Beacon Locations
Retrieves current location information for all beacons.

```http
GET /api/locations
```

**Response:**
```json
{
"beacons": [
{
"method": "location_update",
"previous_confident_location": "reception",
"distance": 3.2,
"id": "beacon_001",
"location": "conference_room",
"last_seen": 1703078450
},
{
"method": "location_update",
"previous_confident_location": "office_a",
"distance": 1.8,
"id": "beacon_002",
"location": "meeting_room",
"last_seen": 1703078440
}
]
}
```

#### Get Specific Beacon Location
Retrieves location information for a specific beacon.

```http
GET /api/locations/{id}
```

**Path Parameters:**
- `id` (string): The beacon ID

**Response:**
```json
{
"method": "location_update",
"previous_confident_location": "reception",
"distance": 3.2,
"id": "beacon_001",
"location": "conference_room",
"last_seen": 1703078450
}
```

### Health Check

#### System Health
Check if the API server is running and basic systems are operational.

```http
GET /api/health
```

**Response:**
```json
{
"status": "healthy",
"timestamp": "2024-12-20T10:30:00Z",
"services": {
"database": "connected",
"kafka": "connected",
"redis": "connected"
}
}
```

## WebSocket API

### WebSocket Connection
Connect to the WebSocket endpoint for real-time updates.

```
ws://localhost:8080/ws/broadcast
```

### WebSocket Message Format

#### Beacon Update Notification
```json
{
"type": "beacon_update",
"data": {
"method": "location_update",
"beacon_info": {
"name": "Conference Room Beacon",
"beacon_id": "beacon_001",
"beacon_type": "ingics",
"distance": 2.5
},
"name": "conference_room",
"beacon_name": "Conference Room Beacon",
"previous_location": "reception",
"new_location": "conference_room",
"timestamp": 1703078450
}
}
```

#### Button Press Event
```json
{
"type": "button_event",
"data": {
"beacon_id": "beacon_001",
"button_counter": 43,
"button_mode": "normal",
"timestamp": 1703078460
}
}
```

#### Battery Alert
```json
{
"type": "battery_alert",
"data": {
"beacon_id": "beacon_002",
"battery_level": 15,
"alert_level": "warning",
"timestamp": 1703078470
}
}
```

#### Fall Detection Event
```json
{
"type": "fall_detection",
"data": {
"beacon_id": "beacon_001",
"event_type": "fall_detected",
"confidence": 92,
"timestamp": 1703078480
}
}
```

#### System Status Update
```json
{
"type": "system_status",
"data": {
"active_beacons": 12,
"total_locations": 8,
"kafka_status": "connected",
"redis_status": "connected",
"timestamp": 1703078490
}
}
```

## Data Models

### Beacon Model
```typescript
interface Beacon {
name: string;
beacon_id: string;
beacon_type: "ingics" | "eddystone" | "minew_b7" | "ibeacon";
beacon_location: string;
last_seen: number; // Unix timestamp
distance: number; // Distance in meters
previous_location?: string;
previous_confident_location?: string;
expired_location?: string;
location_confidence: number; // 0-100
location_history: string[];
beacon_metrics: BeaconMetric[];

// Handshake/Button specific fields
hs_button_counter: number;
hs_button_prev: number;
hs_button_battery: number;
hs_button_random: string;
hs_button_mode: string;
}
```

### BeaconMetric Model
```typescript
interface BeaconMetric {
location: string;
distance: number;
rssi: number;
timestamp: number;
}
```

### Settings Model
```typescript
interface Settings {
location_confidence: number; // Minimum confidence level (0-100)
last_seen_threshold: number; // Seconds before beacon considered offline
beacon_metrics_size: number; // Number of RSSI measurements to keep
ha_send_interval: number; // Home Assistant update interval (seconds)
ha_send_changes_only: boolean; // Only send updates on changes
rssi_min_threshold: number; // Minimum RSSI for detection
enforce_rssi_threshold: boolean; // Filter weak signals
}
```

### LocationChange Model
```typescript
interface LocationChange {
method: string; // "location_update" | "beacon_added" | "beacon_removed"
beacon_ref: Beacon; // Complete beacon information
name: string; // Beacon name
beacon_name: string; // Beacon name (duplicate)
previous_location: string; // Previous location
new_location: string; // New location
timestamp: number; // Unix timestamp
}
```

## Error Responses

### Standard Error Format
```json
{
"error": {
"code": "VALIDATION_ERROR",
"message": "Invalid request data",
"details": {
"field": "beacon_id",
"reason": "Beacon ID is required"
}
}
}
```

### Common Error Codes

| Code | HTTP Status | Description |
|------|-------------|-------------|
| `VALIDATION_ERROR` | 400 | Request data validation failed |
| `NOT_FOUND` | 404 | Resource not found |
| `CONFLICT` | 409 | Resource already exists |
| `INTERNAL_ERROR` | 500 | Internal server error |
| `SERVICE_UNAVAILABLE` | 503 | Required service is unavailable |

### Validation Error Example
```http
POST /api/beacons
Content-Type: application/json
```

**Invalid Request:**
```json
{
"name": "",
"beacon_type": "invalid_type"
}
```

**Response:**
```json
{
"error": {
"code": "VALIDATION_ERROR",
"message": "Invalid request data",
"details": {
"name": "Name cannot be empty",
"beacon_type": "Invalid beacon type. Must be one of: ingics, eddystone, minew_b7, ibeacon"
}
}
}
```

## Rate Limiting

Currently, the API does not implement rate limiting. Consider implementing rate limiting for production deployments:

- Suggested limits: 100 requests per minute per IP address
- WebSocket connections: Maximum 50 concurrent connections
- Consider authentication-based rate limiting

## CORS Configuration

The API server is configured with CORS enabled for development. Production deployments should restrict CORS origins to specific domains.

## Integration Examples

### JavaScript/TypeScript Client

```typescript
class PresenceAPIClient {
private baseURL: string;

constructor(baseURL: string = 'http://localhost:8080') {
this.baseURL = baseURL;
}

async getBeacons(): Promise<Beacon[]> {
const response = await fetch(`${this.baseURL}/api/beacons`);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
return data.beacons;
}

async createBeacon(beacon: Partial<Beacon>): Promise<Beacon> {
const response = await fetch(`${this.baseURL}/api/beacons`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(beacon),
});

if (!response.ok) {
const error = await response.json();
throw new Error(error.error?.message || 'Failed to create beacon');
}

const data = await response.json();
return data.beacon;
}

connectWebSocket(onMessage: (message: any) => void): WebSocket {
const ws = new WebSocket(`${this.baseURL.replace('http', 'ws')}/ws/broadcast`);

ws.onmessage = (event) => {
try {
const message = JSON.parse(event.data);
onMessage(message);
} catch (error) {
console.error('Failed to parse WebSocket message:', error);
}
};

ws.onerror = (error) => {
console.error('WebSocket error:', error);
};

ws.onclose = () => {
console.log('WebSocket connection closed');
};

return ws;
}
}

// Usage example
const client = new PresenceAPIClient();

// Get all beacons
const beacons = await client.getBeacons();
console.log('Active beacons:', beacons);

// Create a new beacon
const newBeacon = await client.createBeacon({
name: 'Test Beacon',
beacon_id: 'test_beacon_001',
beacon_type: 'eddystone',
beacon_location: 'test_room'
});

// Connect to WebSocket for real-time updates
const ws = client.connectWebSocket((message) => {
switch (message.type) {
case 'beacon_update':
console.log('Beacon location updated:', message.data);
break;
case 'button_event':
console.log('Button pressed:', message.data);
break;
case 'battery_alert':
console.log('Low battery warning:', message.data);
break;
}
});
```

### Python Client

```python
import requests
import websocket
import json
from typing import List, Dict, Any

class PresenceAPIClient:
def __init__(self, base_url: str = "http://localhost:8080"):
self.base_url = base_url
self.ws_url = base_url.replace("http", "ws")

def get_beacons(self) -> List[Dict[str, Any]]:
"""Get all registered beacons."""
response = requests.get(f"{self.base_url}/api/beacons")
response.raise_for_status()
data = response.json()
return data["beacons"]

def create_beacon(self, beacon_data: Dict[str, Any]) -> Dict[str, Any]:
"""Create a new beacon."""
response = requests.post(
f"{self.base_url}/api/beacons",
json=beacon_data
)
response.raise_for_status()
data = response.json()
return data["beacon"]

def update_beacon(self, beacon_id: str, beacon_data: Dict[str, Any]) -> Dict[str, Any]:
"""Update an existing beacon."""
response = requests.put(
f"{self.base_url}/api/beacons/{beacon_id}",
json=beacon_data
)
response.raise_for_status()
data = response.json()
return data["beacon"]

def delete_beacon(self, beacon_id: str) -> None:
"""Delete a beacon."""
response = requests.delete(f"{self.base_url}/api/beacons/{beacon_id}")
response.raise_for_status()

def get_settings(self) -> Dict[str, Any]:
"""Get system settings."""
response = requests.get(f"{self.base_url}/api/settings")
response.raise_for_status()
return response.json()["settings"]

def update_settings(self, settings_data: Dict[str, Any]) -> Dict[str, Any]:
"""Update system settings."""
response = requests.post(
f"{self.base_url}/api/settings",
json=settings_data
)
response.raise_for_status()
return response.json()["settings"]

# Usage example
client = PresenceAPIClient()

# Get all beacons
beacons = client.get_beacons()
print(f"Found {len(beacons)} beacons")

# Create a new beacon
new_beacon = client.create_beacon({
"name": "Python Test Beacon",
"beacon_id": "python_test_001",
"beacon_type": "eddystone",
"beacon_location": "python_room"
})
print(f"Created beacon: {new_beacon['name']}")

# Update settings
settings = client.update_settings({
"location_confidence": 85,
"ha_send_interval": 30
})
print(f"Updated settings: {settings}")
```

## Testing

### Unit Testing Example (Go)

```go
package api_test

import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)

func TestGetBeacons(t *testing.T) {
// Setup test server
router := setupTestRouter()

req, _ := http.NewRequest("GET", "/api/beacons", nil)
w := httptest.NewRecorder()
router.ServeHTTP(w, req)

assert.Equal(t, http.StatusOK, w.Code)

var response map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &response)
assert.NoError(t, err)
assert.Contains(t, response, "beacons")
}

func TestCreateBeacon(t *testing.T) {
router := setupTestRouter()

beaconData := map[string]interface{}{
"name": "Test Beacon",
"beacon_id": "test_001",
"beacon_type": "eddystone",
"beacon_location": "test_room",
}

jsonData, _ := json.Marshal(beaconData)
req, _ := http.NewRequest("POST", "/api/beacons", bytes.NewBuffer(jsonData))
req.Header.Set("Content-Type", "application/json")

w := httptest.NewRecorder()
router.ServeHTTP(w, req)

assert.Equal(t, http.StatusCreated, w.Code)

var response map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &response)
assert.NoError(t, err)
assert.Contains(t, response, "beacon")
}
```

## Security Considerations

For production deployments, consider implementing:

1. **Authentication**: JWT tokens or API key authentication
2. **Authorization**: Role-based access control (RBAC)
3. **Rate Limiting**: Prevent API abuse
4. **Input Validation**: Comprehensive input sanitization
5. **HTTPS**: TLS encryption for all API communications
6. **CORS**: Restrict origins to trusted domains
7. **Logging**: Comprehensive audit logging
8. **Security Headers**: Implement security HTTP headers

## API Versioning

The current API is version 1. Future versions will be:

- Version 1: `/api/v1/...` (current, implied)
- Version 2: `/api/v2/...` (future breaking changes)

Backward compatibility will be maintained within major versions.

+ 0
- 1039
docs/DEPLOYMENT.md
Разница между файлами не показана из-за своего большого размера
Просмотреть файл


Двоичные данные
docs/Frame definition- B7,MWB01,MWC01.pdf Просмотреть файл


+ 0
- 9
docs/README.md Просмотреть файл

@@ -1,9 +0,0 @@
# `/docs`

Design and user documents (in addition to your godoc generated documentation).

Examples:

* https://github.com/gohugoio/hugo/tree/master/docs
* https://github.com/openshift/origin/tree/master/docs
* https://github.com/dapr/dapr/tree/master/docs

+ 319
- 0
docs/REFACTORING_OVERVIEW.md Просмотреть файл

@@ -0,0 +1,319 @@
# Microservices Refactoring Overview

This document provides an extended analysis of the four microservices (`server`, `location`, `bridge`, `decoder`), their current structure, and a concrete refactoring plan for better reusability, separation of concerns, and maintainability.

---

## 1. Extended Overview of Each Service

### 1.1 `cmd/server/main.go` (~211 lines)

**Role:** HTTP API + Kafka consumers + event loop. Central API for gateways, zones, trackers, parser configs, settings, and tracks; consumes location events and alert beacons; runs a ticker to publish tracker list to MQTT topic.

**What lives in `main()` today:**

| Section | Lines (approx) | Responsibility |
|--------|-----------------|----------------|
| Bootstrap | 36–46 | Load config, create AppState, init Kafka manager, create logger, signal context |
| DB + CORS | 48–55 | Connect DB, build CORS options |
| Kafka writers | 56–59 | Populate writers: apibeacons, alert, mqtt, settings, parser |
| Config load & DB seed | 60–89 | Open config file, unmarshal JSON, create configs in DB, find configs, send each to parser topic, call `UpdateDB` |
| Kafka readers | 96–105 | Populate readers: locevents, alertbeacons; create channels; start 2 consumer goroutines |
| Router setup | 107–136 | Mux router + ~25 route registrations (handlers get `db`, `writer`, `ctx` directly) |
| HTTP server | 138–150 | CORS wrapper, handler, `http.Server`, `ListenAndServe` in goroutine |
| **Event loop** | 154–191 | `select`: ctx.Done, chLoc → `LocationToBeaconService`, chEvents → update tracker battery/temp in DB, beaconTicker → marshal trackers, write to mqtt topic |
| Shutdown | 193–210 | Server shutdown, wait group, clean Kafka, cleanup logger |

**Pain points:**

- **Heavy main:** Config loading, DB seeding, and “sync parser configs to Kafka” are one-off startup logic mixed with wiring.
- **Event loop in main:** Business logic (location→beacon, decoder event→DB update, ticker→mqtt) lives in `main` instead of a dedicated component.
- **Handlers take 3–4 args:** `db`, `*kafka.Writer`, `context.Context` passed into every handler; no shared “server” or “app” struct.
- **Global `wg`:** Package-level `sync.WaitGroup` instead of a scoped lifecycle object.

---

### 1.2 `cmd/location/main.go` (~246 lines)

**Role:** Location algorithm service. Consumes raw beacons and settings from Kafka; on a ticker runs either “filter” (score-based) or “ai” (HTTP inference) and writes location events to Kafka.

**What lives in `main()` and adjacent:**

| Section | Lines (approx) | Responsibility |
|--------|-----------------|----------------|
| Bootstrap | 26–38 | AppState, config, Kafka manager, logger, signal context |
| Kafka | 39–54 | Readers: rawbeacons, settings; writer: locevents; channels; 2 consumer goroutines |
| **Event loop** | 56–90 | ctx.Done, locTicker (get settings, branch filter vs ai), chRaw → assignBeaconToList, chSettings → UpdateSettings |
| Shutdown | 92–100 | Break, wg.Wait, clean Kafka, cleanup |

**Logic outside main but still in this package:**

- `getAI` (102–122): HTTP client, TLS skip verify, get token, infer position (API calls).
- `getLikelyLocations` (124–203): Full “filter” algorithm: iterate beacons, score by RSSI/seen, confidence, write `HTTPLocation` to Kafka. ~80 lines.
- `assignBeaconToList` (205–244): Append metric to beacon in AppState, sliding window.

**Pain points:**

- **Algorithm in cmd:** `getLikelyLocations` and `assignBeaconToList` are core domain logic but live under `cmd/location`; not reusable or testable in isolation.
- **getAI in main pkg:** HTTP and TLS setup and API calls are in `main`; should be behind an interface (e.g. “LocationInference”) for testing and reuse.
- **Magic numbers:** `999`, `1.5`, `0.75`, etc. in `getLikelyLocations`; should be config or named constants.
- **Duplicate bootstrap:** Same Kafka/logger/context pattern as server and bridge.

---

### 1.3 `cmd/bridge/main.go` (~212 lines)

**Role:** MQTT ↔ Kafka bridge. Subscribes to MQTT; converts messages to Kafka (rawbeacons); consumes apibeacons, alert, mqtt from Kafka and publishes to MQTT.

**What lives in `main()` and adjacent:**

| Section | Lines (approx) | Responsibility |
|--------|-----------------|----------------|
| Bootstrap | 99–118 | AppState, config, Kafka, logger, context |
| Kafka | 112–127 | Readers: apibeacons, alert, mqtt; writer: rawbeacons; channels; 3 consumer goroutines |
| MQTT client | 129–150 | Options, client ID, handlers, connect, `sub(client)` |
| **Event loop** | 152–188 | ctx.Done, chApi (POST/DELETE → lookup), chAlert → Publish /alerts, chMqtt → Publish /trackers |
| Shutdown | 190–203 | Break, wg.Wait, Kafka cleanup, MQTT disconnect, cleanup |

**Logic in package:**

- `mqtthandler` (27–84): Parse JSON array of `RawReading` or CSV; for JSON, map MAC→ID via AppState, build `BeaconAdvertisement`, write to Kafka. CSV branch does nothing useful after parse (dead code).
- `messagePubHandler`, `connectHandler`, `connectLostHandler`: MQTT callbacks.
- `sub(client)`: Subscribe to `publish_out/#`.

**Pain points:**

- **MQTT and Kafka logic in cmd:** `mqtthandler` is central to the bridge but lives in `main.go`; hard to unit test and reuse.
- **Handler signature:** `mqtthandler(writer, topic, message, appState)` and package-level `messagePubHandler` close over `writer` and `appState`; no injectable “BridgeHandler” or service.
- **Topic parsing:** `strings.Split(topic, "/")[1]` can panic if topic format changes.
- **Dead CSV branch:** Parses CSV but never produces Kafka messages; either implement or remove.

---

### 1.4 `cmd/decoder/main.go` (~139 lines)

**Role:** Decode raw beacon payloads using a parser registry; consume rawbeacons and parser config updates; produce alertbeacons.

**What lives in `main()` and adjacent:**

| Section | Lines (approx) | Responsibility |
|--------|-----------------|----------------|
| Bootstrap | 25–55 | AppState, config, parser registry, logger, context, Kafka readers/writers, channels, 2 consumers |
| **Event loop** | 57–76 | ctx.Done, chRaw → processIncoming (decodeBeacon), chParser → add/delete/update registry |
| Shutdown | 78–86 | Break, wg.Wait, Kafka cleanup, cleanup |

**Logic in package:**

- `processIncoming` (88–95): Wraps `decodeBeacon`, logs errors.
- `decodeBeacon` (97–138): Hex decode, remove flags, parse AD structures, run parser registry, dedupe by event hash, write to alertbeacons. This is core decoder logic.

**Pain points:**

- **Decode logic in cmd:** `decodeBeacon` belongs in a `decoder` or `parser` service/package under `internal`, not in `cmd`.
- **Parser registry in main:** Registry is created and updated in main; could be a component that main wires and passes into a “DecoderService” or “EventProcessor”.

---

## 2. Cross-Cutting Observations

### 2.1 Duplication Across All Four `main.go`

- **Bootstrap:** Each service does: load service-specific config, create `AppState` (or not, server doesn’t use it for the same purpose), init `KafkaManager`, create logger, `signal.NotifyContext`.
- **Kafka pattern:** `PopulateKafkaManager` for readers/writers, create channels, `wg.Add(N)`, `go Consume(...)`.
- **Shutdown:** Break loop, `wg.Wait()`, `CleanKafkaReaders/Writers`, optional MQTT disconnect, logger cleanup.

This suggests a small **runtime/bootstrap** package that returns config, logger, Kafka manager, and context (and optionally an “App” struct that owns lifecycle).

### 2.2 Where Business Logic Lives

- **Server:** Event loop in main (location→beacon, decoder event→DB, ticker→mqtt); handlers in `internal/pkg/controller` but take raw `*gorm.DB` and `*kafka.Writer`.
- **Location:** Filter algorithm and “assign beacon to list” in `cmd/location`; no `internal` location or algorithm package.
- **Bridge:** MQTT message handling in `cmd/bridge`; no `internal` bridge or mqtt handler package.
- **Decoder:** Decode and registry handling in `cmd/decoder`; parser registry is in `model`, but “process raw → alert” is in main.

So today, a lot of “service logic” is either in `main` or in `cmd/<service>` instead of in `internal` behind clear interfaces.

### 2.3 Dependency Direction Today

- All `cmd/*` import from `internal/pkg/*` (config, logger, kafkaclient, model, service, controller, database, apiclient, appcontext).
- Controllers and services take concrete types (`*gorm.DB`, `*kafka.Writer`). No interfaces for “store” or “message writer,” so testing and swapping implementations require mocks at the concrete type level.
- `model` is a single large namespace (beacons, parser, trackers, gateways, zones, settings, etc.); no split by bounded context (e.g. beacon vs parser vs location).

---

## 3. Proposed Directory and Package Layout

Goal: keep `cmd/<service>/main.go` as a **thin composition layer** that only wires config, infra, and “app” components, and runs the process. All reusable logic and “where things live” should be clear from the directory structure.

### 3.1 Recommended Tree

```text
internal/
├── pkg/
│ ├── config/ # Keep; optional: split LoadServer/LoadLocation into configs subpackage or env schema
│ ├── logger/ # Keep
│ │
│ ├── domain/ # NEW: shared domain types and interfaces (no infra)
│ │ ├── beacon.go # Beacon, BeaconEvent, BeaconMetric, BeaconAdvertisement, BeaconsList, etc.
│ │ ├── parser.go # Config, KafkaParser, BeaconParser, ParserRegistry (or keep registry in service)
│ │ ├── location.go # HTTPLocation, location scoring constants
│ │ ├── trackers.go # Tracker, ApiUpdate, Alert, etc.
│ │ └── types.go # RawReading, Settings, and other shared DTOs
│ │
│ ├── store/ # NEW: in-memory / app state (optional rename of appcontext)
│ │ └── appstate.go # AppState (move from common/appcontext), same API
│ │
│ ├── messaging/ # NEW: Kafka (and optionally MQTT) behind interfaces
│ │ ├── kafka.go # Manager, Consume, Writer/Reader interfaces, implementation
│ │ └── interfaces.go # MessageWriter, MessageReader for tests
│ │
│ ├── db/ # Rename from database; single place for GORM
│ │ ├── postgres.go # Connect(cfg) (*gorm.DB, error)
│ │ └── models.go # GORM model structs only (Tracker, Gateway, Zone, etc.)
│ │
│ ├── client/ # Rename from apiclient; external HTTP (auth, infer, etc.)
│ │ ├── auth.go
│ │ ├── data.go
│ │ └── updatedb.go
│ │
│ ├── api/ # NEW: HTTP surface for server only
│ │ ├── handler/ # Handlers (move from controller); receive a Server or deps struct
│ │ │ ├── gateways.go
│ │ │ ├── zones.go
│ │ │ ├── trackers.go
│ │ │ ├── trackerzones.go
│ │ │ ├── parser.go
│ │ │ ├── settings.go
│ │ │ ├── tracks.go
│ │ │ └── health.go # /health, /ready
│ │ ├── middleware/ # CORS, logging, recovery, request ID
│ │ └── response/ # JSON success/error helpers
│ │
│ ├── service/ # Keep; make depend on interfaces
│ │ ├── beacon.go # LocationToBeaconService (depends on DB + Writer interfaces)
│ │ ├── parser.go
│ │ └── location.go # NEW: Filter algorithm, AssignBeaconToList (from cmd/location)
│ │
│ ├── location/ # NEW: location service internals
│ │ ├── filter.go # getLikelyLocations logic (score, confidence, write)
│ │ ├── assign.go # assignBeaconToList
│ │ └── inference.go # Interface for “get AI position”; adapter over client
│ │
│ ├── bridge/ # NEW: bridge-specific processing
│ │ ├── mqtt.go # MQTT client options, connect, subscribe (thin wrapper)
│ │ └── handler.go # MQTT message → Kafka (mqtthandler logic)
│ │
│ └── decoder/ # NEW: decoder-specific processing (or under service/)
│ │ ├── process.go # ProcessIncoming, DecodeBeacon (from cmd/decoder)
│ │ └── registry.go # Optional: wrap ParserRegistry with add/delete/update
│ │
├── app/ # NEW (optional): per-service composition / “application” layer
│ ├── server/
│ │ ├── app.go # ServerApp: config, db, kafka, router, event loop, shutdown
│ │ ├── routes.go # Register all routes with deps
│ │ └── events.go # RunEventLoop(ctx): location, alertbeacons, ticker
│ ├── location/
│ │ ├── app.go # LocationApp: config, kafka, store, filter/inference, run loop
│ │ └── loop.go # Run(ctx): ticker + channels
│ ├── bridge/
│ │ ├── app.go # BridgeApp: config, kafka, mqtt, store, run loop
│ │ └── loop.go
│ └── decoder/
│ ├── app.go # DecoderApp: config, kafka, registry, run loop
│ └── loop.go
```

You can adopt this incrementally: e.g. first add `internal/app/server` and move event loop + route registration there, then do the same for location/bridge/decoder.

### 3.2 What Each `cmd/<service>/main.go` Becomes

- **cmd/server/main.go:**
Load config (or exit on error), create logger, call `serverapp.New(cfg, logger)` (or bootstrap), then `app.Run(ctx)` and `app.Shutdown()`. No DB seed or parser sync in main—move those into `ServerApp` constructor or a `ServerApp.Init(ctx)`.

- **cmd/location/main.go:**
Load config, create logger, create AppState, call `locationapp.New(cfg, logger, appState)` (and optionally Kafka manager from bootstrap), then `app.Run(ctx)` and `app.Shutdown()`.

- **cmd/bridge/main.go:**
Same idea: bootstrap, then `bridgeapp.New(...)`, `Run(ctx)`, `Shutdown()`.

- **cmd/decoder/main.go:**
Bootstrap, then `decoderapp.New(...)` with parser registry, `Run(ctx)`, `Shutdown()`.

So each `main.go` is on the order of 20–40 lines: config + logger + optional bootstrap, build app, run, shutdown.

---

## 4. Refactoring Steps (Concrete)

### Phase 1: Extract bootstrap and shrink main (high impact, low risk)

1. **Add `internal/pkg/bootstrap` (or `runtime`):**
- `Bootstrap(ctx) (cfg *config.Config, log *slog.Logger, kafka *kafkaclient.KafkaManager, cleanup func())` for a given service type (or one function per service that returns what that service needs).
- Use it from all four `main.go` so that “create logger + kafka + context” is one place.
2. **Move shutdown sequence into a single place:** e.g. `Shutdown(ctx, kafkaManager, cleanup)` so each main just calls it after breaking the loop.

After this, each `main` is: load config → bootstrap → build Kafka/channels (or get from app) → create “App” (see Phase 2) → run loop → shutdown.

### Phase 2: Move event loops and “server wiring” into `internal/app`

3. **Server**
- Add `internal/app/server`: `ServerApp` struct holding cfg, db, kafkaManager, channels, router, server, wg.
- Move config load + DB connect + parser sync + Kafka reader setup into `NewServerApp(cfg, logger)` or `NewServerApp(...).Init(ctx)`.
- Move route registration into `RegisterRoutes(app *ServerApp)` (or `app.Routes()` that returns `http.Handler`).
- Move the event loop (select over chLoc, chEvents, ticker) into `ServerApp.RunEventLoop(ctx)`.
- `main` becomes: config → bootstrap → NewServerApp → Init → go ListenAndServe → RunEventLoop(ctx) → Shutdown.
4. **Location**
- Add `internal/app/location`: `LocationApp` with kafkaManager, appState, channels, filter algo, inference client.
- Move `getLikelyLocations` and `assignBeaconToList` into `internal/pkg/service/location.go` or `internal/pkg/location/filter.go` and `assign.go`.
- Move `getAI` behind an interface `LocationInferencer` in `internal/pkg/location`; implement with `client` (auth + infer).
- Event loop in `LocationApp.Run(ctx)`.
5. **Bridge**
- Add `internal/app/bridge`: `BridgeApp` with kafkaManager, mqtt client, appState, channels.
- Move `mqtthandler` and MQTT subscribe into `internal/pkg/bridge/handler.go` and `mqtt.go`; call from app.
- Event loop in `BridgeApp.Run(ctx)`.
6. **Decoder**
- Add `internal/app/decoder`: `DecoderApp` with kafkaManager, parser registry, channels.
- Move `processIncoming` and `decodeBeacon` into `internal/pkg/decoder/process.go`.
- Event loop in `DecoderApp.Run(ctx)`.

This removes “too much inside main” and gives a single place per service to add features (the `app` and the packages it uses).

### Phase 3: Interfaces and dependency injection

7. **Messaging**
- In `internal/pkg/messaging` (or keep `kafkaclient` and add interfaces there), define e.g. `MessageWriter` and `MessageReader` interfaces.
- Have `KafkaManager` (or a thin wrapper) implement them so handlers and services accept interfaces; tests can inject fakes.
8. **Store**
- Rename or keep `appcontext`; if you introduce `store`, have `AppState` implement e.g. `BeaconStore` / `SettingsStore` so location and bridge depend on interfaces.
9. **Server handlers**
- Instead of passing `db`, `writer`, `ctx` to each handler, introduce a `Server` or `HandlerEnv` struct that holds DB, writers, and optionally logger; handlers become methods or receive this struct. Then you can add health checks and middleware in one place.

### Phase 4: Domain and API clarity

10. **Domain**
- Create `internal/pkg/domain` and move shared types from `model` into domain subpackages (beacon, parser, location, trackers, etc.). Keep `model` as an alias or migrate imports gradually so that “core types” live under domain and “GORM models” stay under `db` if you split them.
11. **API**
- Move HTTP handlers from `controller` to `internal/pkg/api/handler`; add `api/response` for JSON and errors; add `api/middleware` for CORS, logging, recovery. Register routes in `app/server` using these handlers.

---

## 5. Summary Table

| Service | Current main responsibilities | After refactor: main does | New home for logic |
|----------|-----------------------------------|----------------------------------------|---------------------------------------------|
| server | Bootstrap, DB seed, routes, loop, shutdown | Config, bootstrap, NewServerApp, Run, Shutdown | `app/server` (event loop, routes), `api/handler`, `service` |
| location | Bootstrap, Kafka, loop, shutdown | Config, bootstrap, NewLocationApp, Run, Shutdown | `app/location`, `service/location` or `pkg/location` (filter, assign, inference) |
| bridge | Bootstrap, Kafka, MQTT, loop, shutdown | Config, bootstrap, NewBridgeApp, Run, Shutdown | `app/bridge`, `pkg/bridge` (mqtt, handler) |
| decoder | Bootstrap, Kafka, loop, shutdown | Config, bootstrap, NewDecoderApp, Run, Shutdown | `app/decoder`, `pkg/decoder` (process, decode) |

---

## 6. Benefits After Refactoring

- **Reusability:** Location algorithm, bridge MQTT handling, and decoder logic live in `internal/pkg` and can be tested and reused without running a full `main`.
- **Separation:** `cmd` only composes and runs; `internal/app` owns per-service lifecycle and event loops; `internal/pkg` holds domain, store, messaging, API, and services.
- **Maintainability:** Adding a new route or a new Kafka consumer is “add to ServerApp and register”; adding a new algorithm is “add to location package and call from LocationApp.”
- **Testability:** Event loops and handlers can be unit-tested with fake writers/stores; integration tests can build `*ServerApp` or `*DecoderApp` with test doubles.
- **Consistency:** One bootstrap and one shutdown pattern across all four services; same style of “App” struct and `Run(ctx)`.

You can implement Phase 1 and Phase 2 first (bootstrap + app with event loops and moved logic), then Phase 3 (interfaces) and Phase 4 (domain + API) as follow-ups.

+ 2
- 0
go.mod Просмотреть файл

@@ -25,10 +25,12 @@ require (
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/klauspost/compress v1.15.9 // indirect
github.com/mattn/go-sqlite3 v1.14.22 // indirect
github.com/pierrec/lz4/v4 v4.1.15 // indirect
github.com/stretchr/testify v1.11.1 // indirect
golang.org/x/crypto v0.42.0 // indirect
golang.org/x/net v0.44.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/text v0.29.0 // indirect
gorm.io/driver/sqlite v1.6.0 // indirect
)

+ 4
- 0
go.sum Просмотреть файл

@@ -27,6 +27,8 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
@@ -60,5 +62,7 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ=
gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8=
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=

+ 0
- 21
internal/README.md Просмотреть файл

@@ -1,21 +0,0 @@
# `/internal`

Private application and library code. This is the code you don't want others importing in their applications or libraries. Note that this layout pattern is enforced by the Go compiler itself. See the Go 1.4 [`release notes`](https://golang.org/doc/go1.4#internalpackages) for more details. Note that you are not limited to the top level `internal` directory. You can have more than one `internal` directory at any level of your project tree.

You can optionally add a bit of extra structure to your internal packages to separate your shared and non-shared internal code. It's not required (especially for smaller projects), but it's nice to have visual clues showing the intended package use. Your actual application code can go in the `/internal/app` directory (e.g., `/internal/app/myapp`) and the code shared by those apps in the `/internal/pkg` directory (e.g., `/internal/pkg/myprivlib`).

Examples:

* https://github.com/hashicorp/terraform/tree/main/internal
* https://github.com/influxdata/influxdb/tree/master/internal
* https://github.com/perkeep/perkeep/tree/master/internal
* https://github.com/jaegertracing/jaeger/tree/main/internal
* https://github.com/moby/moby/tree/master/internal
* https://github.com/satellity/satellity/tree/main/internal
* https://github.com/minio/minio/tree/master/internal

## `/internal/pkg`

Examples:

* https://github.com/hashicorp/waypoint/tree/main/internal/pkg

+ 0
- 0
internal/app/_your_app_/.keep Просмотреть файл


+ 122
- 0
internal/app/bridge/app.go Просмотреть файл

@@ -0,0 +1,122 @@
package bridge

import (
"context"
"encoding/json"
"log/slog"
"sync"

"github.com/AFASystems/presence/internal/pkg/bridge"
"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/kafkaclient"
"github.com/AFASystems/presence/internal/pkg/logger"
"github.com/AFASystems/presence/internal/pkg/model"
mqtt "github.com/eclipse/paho.mqtt.golang"
)

// BridgeApp holds dependencies for the bridge service (MQTT <-> Kafka).
type BridgeApp struct {
Cfg *config.Config
KafkaManager *kafkaclient.KafkaManager
AppState *appcontext.AppState
MQTT *bridge.MQTTClient
ChApi chan model.ApiUpdate
ChAlert chan model.Alert
ChMqtt chan []model.Tracker
Cleanup func()
wg sync.WaitGroup
}

// New creates a BridgeApp with Kafka readers (apibeacons, alert, mqtt), writer (rawbeacons), and MQTT client.
func New(cfg *config.Config) (*BridgeApp, error) {
appState := appcontext.NewAppState()
kafkaManager := kafkaclient.InitKafkaManager()

srvLogger, cleanup := logger.CreateLogger("bridge.log")
slog.SetDefault(srvLogger)

readerTopics := []string{"apibeacons", "alert", "mqtt"}
writerTopics := []string{"rawbeacons"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "bridge", readerTopics)
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "", writerTopics)
slog.Info("bridge service initialized", "readers", readerTopics, "writers", writerTopics)

writer := kafkaManager.GetWriter("rawbeacons")
mqttClient, err := bridge.NewMQTTClient(cfg, func(m mqtt.Message) {
bridge.HandleMQTTMessage(m.Topic(), m.Payload(), appState, writer)
})
if err != nil {
cleanup()
return nil, err
}
mqttClient.Subscribe()

return &BridgeApp{
Cfg: cfg,
KafkaManager: kafkaManager,
AppState: appState,
MQTT: mqttClient,
ChApi: make(chan model.ApiUpdate, config.SMALL_CHANNEL_SIZE),
ChAlert: make(chan model.Alert, config.SMALL_CHANNEL_SIZE),
ChMqtt: make(chan []model.Tracker, config.SMALL_CHANNEL_SIZE),
Cleanup: cleanup,
}, nil
}

// Run starts Kafka consumers and the event loop until ctx is cancelled.
func (a *BridgeApp) Run(ctx context.Context) {
a.wg.Add(3)
go kafkaclient.Consume(a.KafkaManager.GetReader("apibeacons"), a.ChApi, ctx, &a.wg)
go kafkaclient.Consume(a.KafkaManager.GetReader("alert"), a.ChAlert, ctx, &a.wg)
go kafkaclient.Consume(a.KafkaManager.GetReader("mqtt"), a.ChMqtt, ctx, &a.wg)

for {
select {
case <-ctx.Done():
return
case msg := <-a.ChApi:
switch msg.Method {
case "POST":
a.AppState.AddBeaconToLookup(msg.MAC, msg.ID)
slog.Info("beacon added to lookup", "id", msg.ID)
case "DELETE":
if msg.MAC == "all" {
a.AppState.CleanLookup()
slog.Info("lookup cleared")
continue
}
a.AppState.RemoveBeaconFromLookup(msg.MAC)
slog.Info("beacon removed from lookup", "mac", msg.MAC)
}
case msg := <-a.ChAlert:
p, err := json.Marshal(msg)
if err != nil {
slog.Error("marshaling alert", "err", err)
continue
}
a.MQTT.Client.Publish("/alerts", 0, true, p)
case msg := <-a.ChMqtt:
p, err := json.Marshal(msg)
if err != nil {
slog.Error("marshaling trackers", "err", err)
continue
}
a.MQTT.Client.Publish("/trackers", 0, true, p)
}
}
}

// Shutdown disconnects MQTT, waits for consumers, and cleans up.
func (a *BridgeApp) Shutdown() {
a.wg.Wait()
if a.MQTT != nil {
a.MQTT.Disconnect()
}
a.KafkaManager.CleanKafkaReaders()
a.KafkaManager.CleanKafkaWriters()
if a.Cleanup != nil {
a.Cleanup()
}
slog.Info("bridge service shutdown complete")
}

+ 91
- 0
internal/app/decoder/app.go Просмотреть файл

@@ -0,0 +1,91 @@
package decoder

import (
"context"
"log/slog"
"sync"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/decoder"
"github.com/AFASystems/presence/internal/pkg/kafkaclient"
"github.com/AFASystems/presence/internal/pkg/logger"
"github.com/AFASystems/presence/internal/pkg/model"
)

// DecoderApp holds dependencies for the decoder service.
type DecoderApp struct {
Cfg *config.Config
KafkaManager *kafkaclient.KafkaManager
AppState *appcontext.AppState
ParserRegistry *model.ParserRegistry
ChRaw chan model.BeaconAdvertisement
ChParser chan model.KafkaParser
Cleanup func()
wg sync.WaitGroup
}

// New creates a DecoderApp with Kafka readers (rawbeacons, parser) and writer (alertbeacons).
func New(cfg *config.Config) (*DecoderApp, error) {
appState := appcontext.NewAppState()
kafkaManager := kafkaclient.InitKafkaManager()

srvLogger, cleanup := logger.CreateLogger("decoder.log")
slog.SetDefault(srvLogger)

readerTopics := []string{"rawbeacons", "parser"}
writerTopics := []string{"alertbeacons"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "decoder", readerTopics)
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "", writerTopics)
slog.Info("decoder service initialized", "readers", readerTopics, "writers", writerTopics)

registry := &model.ParserRegistry{
ParserList: make(map[string]model.BeaconParser),
}

return &DecoderApp{
Cfg: cfg,
KafkaManager: kafkaManager,
AppState: appState,
ParserRegistry: registry,
ChRaw: make(chan model.BeaconAdvertisement, config.LARGE_CHANNEL_SIZE),
ChParser: make(chan model.KafkaParser, config.SMALL_CHANNEL_SIZE),
Cleanup: cleanup,
}, nil
}

// Run starts Kafka consumers and the event loop until ctx is cancelled.
func (a *DecoderApp) Run(ctx context.Context) {
a.wg.Add(2)
go kafkaclient.Consume(a.KafkaManager.GetReader("rawbeacons"), a.ChRaw, ctx, &a.wg)
go kafkaclient.Consume(a.KafkaManager.GetReader("parser"), a.ChParser, ctx, &a.wg)

for {
select {
case <-ctx.Done():
return
case msg := <-a.ChRaw:
decoder.ProcessIncoming(msg, a.AppState, a.KafkaManager.GetWriter("alertbeacons"), a.ParserRegistry)
case msg := <-a.ChParser:
switch msg.ID {
case "add":
a.ParserRegistry.Register(msg.Config.Name, msg.Config)
case "delete":
a.ParserRegistry.Unregister(msg.Name)
case "update":
a.ParserRegistry.Register(msg.Config.Name, msg.Config)
}
}
}
}

// Shutdown waits for consumers and cleans up.
func (a *DecoderApp) Shutdown() {
a.wg.Wait()
a.KafkaManager.CleanKafkaReaders()
a.KafkaManager.CleanKafkaWriters()
if a.Cleanup != nil {
a.Cleanup()
}
slog.Info("decoder service shutdown complete")
}

+ 100
- 0
internal/app/location/app.go Просмотреть файл

@@ -0,0 +1,100 @@
package location

import (
"context"
"fmt"
"log/slog"
"sync"
"time"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/kafkaclient"
"github.com/AFASystems/presence/internal/pkg/logger"
pkglocation "github.com/AFASystems/presence/internal/pkg/location"
"github.com/AFASystems/presence/internal/pkg/model"
)

// LocationApp holds dependencies for the location service.
type LocationApp struct {
Cfg *config.Config
KafkaManager *kafkaclient.KafkaManager
AppState *appcontext.AppState
Inferencer pkglocation.Inferencer
ChRaw chan model.BeaconAdvertisement
ChSettings chan map[string]any
Cleanup func()
wg sync.WaitGroup
}

// New creates a LocationApp with Kafka readers (rawbeacons, settings) and writer (locevents).
func New(cfg *config.Config) (*LocationApp, error) {
appState := appcontext.NewAppState()
kafkaManager := kafkaclient.InitKafkaManager()

srvLogger, cleanup := logger.CreateLogger("location.log")
slog.SetDefault(srvLogger)

readerTopics := []string{"rawbeacons", "settings"}
writerTopics := []string{"locevents"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "location", readerTopics)
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "", writerTopics)
slog.Info("location service initialized", "readers", readerTopics, "writers", writerTopics)

return &LocationApp{
Cfg: cfg,
KafkaManager: kafkaManager,
AppState: appState,
Inferencer: pkglocation.NewDefaultInferencer(cfg.TLSInsecureSkipVerify),
ChRaw: make(chan model.BeaconAdvertisement, config.LARGE_CHANNEL_SIZE),
ChSettings: make(chan map[string]any, config.SMALL_CHANNEL_SIZE),
Cleanup: cleanup,
}, nil
}

// Run starts consumers and the event loop until ctx is cancelled.
func (a *LocationApp) Run(ctx context.Context) {
a.wg.Add(2)
go kafkaclient.Consume(a.KafkaManager.GetReader("rawbeacons"), a.ChRaw, ctx, &a.wg)
go kafkaclient.Consume(a.KafkaManager.GetReader("settings"), a.ChSettings, ctx, &a.wg)

locTicker := time.NewTicker(config.SMALL_TICKER_INTERVAL)
defer locTicker.Stop()

for {
select {
case <-ctx.Done():
return
case <-locTicker.C:
settings := a.AppState.GetSettings()
slog.Info("location tick", "settings", fmt.Sprintf("%+v", settings))
switch settings.CurrentAlgorithm {
case "filter":
pkglocation.GetLikelyLocations(a.AppState, a.KafkaManager.GetWriter("locevents"))
case "ai":
inferred, err := a.Inferencer.Infer(ctx, a.Cfg)
if err != nil {
slog.Error("AI inference", "err", err)
continue
}
slog.Info("AI algorithm", "count", inferred.Count, "items", len(inferred.Items))
}
case msg := <-a.ChRaw:
pkglocation.AssignBeaconToList(msg, a.AppState)
case msg := <-a.ChSettings:
slog.Info("settings update", "msg", msg)
a.AppState.UpdateSettings(msg)
}
}
}

// Shutdown waits for consumers and cleans up Kafka and logger.
func (a *LocationApp) Shutdown() {
a.wg.Wait()
a.KafkaManager.CleanKafkaReaders()
a.KafkaManager.CleanKafkaWriters()
if a.Cleanup != nil {
a.Cleanup()
}
slog.Info("location service shutdown complete")
}

+ 145
- 0
internal/app/server/app.go Просмотреть файл

@@ -0,0 +1,145 @@
package server

import (
"context"
"encoding/json"
"fmt"
"io"
"log/slog"
"net/http"
"os"
"sync"

"github.com/AFASystems/presence/internal/pkg/apiclient"
"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/database"
"github.com/AFASystems/presence/internal/pkg/kafkaclient"
"github.com/AFASystems/presence/internal/pkg/logger"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/AFASystems/presence/internal/pkg/service"
"gorm.io/gorm"
)

// ServerApp holds dependencies and state for the server service.
type ServerApp struct {
Cfg *config.Config
DB *gorm.DB
KafkaManager *kafkaclient.KafkaManager
AppState *appcontext.AppState
ChLoc chan model.HTTPLocation
ChEvents chan model.BeaconEvent
ctx context.Context
Server *http.Server
Cleanup func()
wg sync.WaitGroup
}

// New creates a ServerApp: loads config, creates logger, connects DB, creates Kafka manager and writers.
// Caller must call Init(ctx) then Run(ctx) then Shutdown().
func New(cfg *config.Config) (*ServerApp, error) {
srvLogger, cleanup := logger.CreateLogger("server.log")
slog.SetDefault(srvLogger)

db, err := database.Connect(cfg)
if err != nil {
cleanup()
return nil, fmt.Errorf("database: %w", err)
}

appState := appcontext.NewAppState()
kafkaManager := kafkaclient.InitKafkaManager()

writerTopics := []string{"apibeacons", "alert", "mqtt", "settings", "parser"}
kafkaManager.PopulateKafkaManager(cfg.KafkaURL, "", writerTopics)
slog.Info("Kafka writers initialized", "topics", writerTopics)

return &ServerApp{
Cfg: cfg,
DB: db,
KafkaManager: kafkaManager,
AppState: appState,
Cleanup: cleanup,
}, nil
}

// Init loads config from file, seeds DB, runs UpdateDB, adds Kafka readers and starts consumers.
func (a *ServerApp) Init(ctx context.Context) error {
a.ctx = ctx

configFile, err := os.Open(a.Cfg.ConfigPath)
if err != nil {
return fmt.Errorf("config file: %w", err)
}
defer configFile.Close()

b, err := io.ReadAll(configFile)
if err != nil {
return fmt.Errorf("read config: %w", err)
}

var configs []model.Config
if err := json.Unmarshal(b, &configs); err != nil {
return fmt.Errorf("unmarshal config: %w", err)
}

for _, c := range configs {
a.DB.Create(&c)
}
a.DB.Find(&configs)
for _, c := range configs {
kp := model.KafkaParser{ID: "add", Config: c}
if err := service.SendParserConfig(kp, a.KafkaManager.GetWriter("parser"), ctx); err != nil {
slog.Error("sending parser config to kafka", "err", err, "name", c.Name)
}
}

if err := apiclient.UpdateDB(a.DB, ctx, a.Cfg, a.KafkaManager.GetWriter("apibeacons"), a.AppState); err != nil {
slog.Error("UpdateDB", "err", err)
}

readerTopics := []string{"locevents", "alertbeacons"}
a.KafkaManager.PopulateKafkaManager(a.Cfg.KafkaURL, "server", readerTopics)
slog.Info("Kafka readers initialized", "topics", readerTopics)

a.ChLoc = make(chan model.HTTPLocation, config.SMALL_CHANNEL_SIZE)
a.ChEvents = make(chan model.BeaconEvent, config.MEDIUM_CHANNEL_SIZE)

a.wg.Add(2)
go kafkaclient.Consume(a.KafkaManager.GetReader("locevents"), a.ChLoc, ctx, &a.wg)
go kafkaclient.Consume(a.KafkaManager.GetReader("alertbeacons"), a.ChEvents, ctx, &a.wg)

a.Server = &http.Server{
Addr: a.Cfg.HTTPAddr,
Handler: a.RegisterRoutes(),
}
return nil
}

// Run starts the HTTP server and runs the event loop until ctx is cancelled.
func (a *ServerApp) Run(ctx context.Context) {
go func() {
if err := a.Server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
slog.Error("HTTP server", "err", err)
}
}()
RunEventLoop(ctx, a)
}

// Shutdown stops the HTTP server, waits for consumers, and cleans up Kafka and logger.
func (a *ServerApp) Shutdown() {
if a.Server != nil {
if err := a.Server.Shutdown(context.Background()); err != nil {
slog.Error("server shutdown", "err", err)
}
slog.Info("HTTP server stopped")
}
a.wg.Wait()
slog.Info("Kafka consumers stopped")
a.KafkaManager.CleanKafkaReaders()
a.KafkaManager.CleanKafkaWriters()
if a.Cleanup != nil {
a.Cleanup()
}
slog.Info("server shutdown complete")
}

+ 51
- 0
internal/app/server/events.go Просмотреть файл

@@ -0,0 +1,51 @@
package server

import (
"context"
"encoding/json"
"log/slog"
"time"

"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/AFASystems/presence/internal/pkg/service"
"github.com/segmentio/kafka-go"
)

// RunEventLoop runs the server event loop until ctx is cancelled.
// Handles: location events -> LocationToBeaconService, alert events -> update tracker in DB, ticker -> publish trackers to mqtt.
func RunEventLoop(ctx context.Context, a *ServerApp) {
beaconTicker := time.NewTicker(config.MEDIUM_TICKER_INTERVAL)
defer beaconTicker.Stop()

for {
select {
case <-ctx.Done():
return
case msg := <-a.ChLoc:
service.LocationToBeaconService(msg, a.DB, a.KafkaManager.GetWriter("alert"), ctx)
case msg := <-a.ChEvents:
slog.Info("decoder event", "event", msg)
id := msg.ID
if err := a.DB.First(&model.Tracker{}, "id = ?", id).Error; err != nil {
slog.Error("decoder event for untracked beacon", "id", id)
continue
}
if err := a.DB.Updates(&model.Tracker{ID: id, Battery: msg.Battery, Temperature: msg.Temperature}).Error; err != nil {
slog.Error("saving decoder event for beacon", "id", id, "err", err)
continue
}
case <-beaconTicker.C:
var list []model.Tracker
a.DB.Find(&list)
eMsg, err := json.Marshal(list)
if err != nil {
slog.Error("marshaling trackers list", "err", err)
continue
}
if err := a.KafkaManager.GetWriter("mqtt").WriteMessages(ctx, kafka.Message{Value: eMsg}); err != nil {
slog.Error("writing trackers to mqtt topic", "err", err)
}
}
}
}

+ 59
- 0
internal/app/server/routes.go Просмотреть файл

@@ -0,0 +1,59 @@
package server

import (
"net/http"

"github.com/AFASystems/presence/internal/pkg/api/handler"
"github.com/AFASystems/presence/internal/pkg/api/middleware"
"github.com/AFASystems/presence/internal/pkg/controller"
"github.com/gorilla/mux"
)

// RegisterRoutes builds the router and applies middleware. Uses app's DB, Kafka writers, and ctx.
func (a *ServerApp) RegisterRoutes() http.Handler {
r := mux.NewRouter()

// Health
r.HandleFunc("/health", handler.Health).Methods("GET")
r.HandleFunc("/ready", handler.Ready(a.DB)).Methods("GET")

// Gateways
r.HandleFunc("/reslevis/getGateways", controller.GatewayListController(a.DB)).Methods("GET")
r.HandleFunc("/reslevis/postGateway", controller.GatewayAddController(a.DB)).Methods("POST")
r.HandleFunc("/reslevis/removeGateway/{id}", controller.GatewayDeleteController(a.DB)).Methods("DELETE")
r.HandleFunc("/reslevis/updateGateway/{id}", controller.GatewayUpdateController(a.DB)).Methods("PUT")

// Zones
r.HandleFunc("/reslevis/getZones", controller.ZoneListController(a.DB)).Methods("GET")
r.HandleFunc("/reslevis/postZone", controller.ZoneAddController(a.DB)).Methods("POST")
r.HandleFunc("/reslevis/removeZone/{id}", controller.ZoneDeleteController(a.DB)).Methods("DELETE")
r.HandleFunc("/reslevis/updateZone", controller.ZoneUpdateController(a.DB)).Methods("PUT")

// Tracker zones
r.HandleFunc("/reslevis/getTrackerZones", controller.TrackerZoneListController(a.DB)).Methods("GET")
r.HandleFunc("/reslevis/postTrackerZone", controller.TrackerZoneAddController(a.DB)).Methods("POST")
r.HandleFunc("/reslevis/removeTrackerZone/{id}", controller.TrackerZoneDeleteController(a.DB)).Methods("DELETE")
r.HandleFunc("/reslevis/updateTrackerZone", controller.TrackerZoneUpdateController(a.DB)).Methods("PUT")

// Trackers
r.HandleFunc("/reslevis/getTrackers", controller.TrackerList(a.DB)).Methods("GET")
r.HandleFunc("/reslevis/postTracker", controller.TrackerAdd(a.DB, a.KafkaManager.GetWriter("apibeacons"), a.ctx)).Methods("POST")
r.HandleFunc("/reslevis/removeTracker/{id}", controller.TrackerDelete(a.DB, a.KafkaManager.GetWriter("apibeacons"), a.ctx)).Methods("DELETE")
r.HandleFunc("/reslevis/updateTracker", controller.TrackerUpdate(a.DB)).Methods("PUT")

// Parser configs
r.HandleFunc("/configs/beacons", controller.ParserListController(a.DB)).Methods("GET")
r.HandleFunc("/configs/beacons", controller.ParserAddController(a.DB, a.KafkaManager.GetWriter("parser"), a.ctx)).Methods("POST")
r.HandleFunc("/configs/beacons/{id}", controller.ParserUpdateController(a.DB, a.KafkaManager.GetWriter("parser"), a.ctx)).Methods("PUT")
r.HandleFunc("/configs/beacons/{id}", controller.ParserDeleteController(a.DB, a.KafkaManager.GetWriter("parser"), a.ctx)).Methods("DELETE")

// Settings
r.HandleFunc("/reslevis/settings", controller.SettingsUpdateController(a.DB, a.KafkaManager.GetWriter("settings"), a.ctx)).Methods("PATCH")
r.HandleFunc("/reslevis/settings", controller.SettingsListController(a.DB)).Methods("GET")

// Tracks
r.HandleFunc("/reslevis/getTracks/{id}", controller.TracksListController(a.DB)).Methods("GET")

chain := middleware.Recovery(middleware.Logging(middleware.RequestID(middleware.CORS(nil, nil, nil)(r))))
return chain
}

+ 29
- 0
internal/pkg/api/handler/health.go Просмотреть файл

@@ -0,0 +1,29 @@
package handler

import (
"net/http"

"github.com/AFASystems/presence/internal/pkg/api/response"
"gorm.io/gorm"
)

// Health returns OK and status "ok". Useful for liveness.
func Health(w http.ResponseWriter, r *http.Request) {
response.JSON(w, http.StatusOK, map[string]string{"status": "ok"})
}

// Ready checks DB connectivity and returns 200 if ready, 503 otherwise.
func Ready(db *gorm.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
sqlDB, err := db.DB()
if err != nil {
response.Error(w, http.StatusServiceUnavailable, "not_ready", "database not available")
return
}
if err := sqlDB.Ping(); err != nil {
response.Error(w, http.StatusServiceUnavailable, "not_ready", "database ping failed")
return
}
response.JSON(w, http.StatusOK, map[string]string{"status": "ready"})
}
}

+ 26
- 0
internal/pkg/api/middleware/cors.go Просмотреть файл

@@ -0,0 +1,26 @@
package middleware

import (
"net/http"

"github.com/gorilla/handlers"
)

// CORS returns a handler that applies CORS with the given origins, headers, and methods.
// If origins is nil or empty, AllowAll() is not applied (caller can use handlers.CORS manually).
func CORS(origins, headers, methods []string) func(http.Handler) http.Handler {
if len(origins) == 0 {
origins = []string{"*"}
}
if len(headers) == 0 {
headers = []string{"X-Requested-With", "Content-Type", "Authorization", RequestIDHeader}
}
if len(methods) == 0 {
methods = []string{"GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"}
}
return handlers.CORS(
handlers.AllowedOrigins(origins),
handlers.AllowedHeaders(headers),
handlers.AllowedMethods(methods),
)
}

+ 41
- 0
internal/pkg/api/middleware/logging.go Просмотреть файл

@@ -0,0 +1,41 @@
package middleware

import (
"log/slog"
"net/http"
"time"
)

// responseWriter wraps http.ResponseWriter to capture status and bytes written.
type responseWriter struct {
http.ResponseWriter
status int
bytes int
}

func (rw *responseWriter) WriteHeader(code int) {
rw.status = code
rw.ResponseWriter.WriteHeader(code)
}

func (rw *responseWriter) Write(b []byte) (int, error) {
n, err := rw.ResponseWriter.Write(b)
rw.bytes += n
return n, err
}

// Logging logs each request with method, path, status, duration, and bytes written.
func Logging(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
wrap := &responseWriter{ResponseWriter: w, status: http.StatusOK}
next.ServeHTTP(wrap, r)
slog.Info("request",
"method", r.Method,
"path", r.URL.Path,
"status", wrap.status,
"duration_ms", time.Since(start).Milliseconds(),
"bytes", wrap.bytes,
)
})
}

+ 22
- 0
internal/pkg/api/middleware/recovery.go Просмотреть файл

@@ -0,0 +1,22 @@
package middleware

import (
"log/slog"
"net/http"
"runtime/debug"
)

// Recovery recovers from panics, logs the stack, and returns 500.
func Recovery(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
slog.Error("panic recovered", "err", err, "stack", string(debug.Stack()))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(`{"error":"internal_error","message":"internal server error"}`))
}
}()
next.ServeHTTP(w, r)
})
}

+ 22
- 0
internal/pkg/api/middleware/requestid.go Просмотреть файл

@@ -0,0 +1,22 @@
package middleware

import (
"net/http"

"github.com/google/uuid"
)

const RequestIDHeader = "X-Request-ID"

// RequestID adds a unique X-Request-ID to each request and to the request context.
// The same ID is set on the response header for tracing.
func RequestID(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
id := r.Header.Get(RequestIDHeader)
if id == "" {
id = uuid.New().String()
}
w.Header().Set(RequestIDHeader, id)
next.ServeHTTP(w, r)
})
}

+ 55
- 0
internal/pkg/api/response/response.go Просмотреть файл

@@ -0,0 +1,55 @@
package response

import (
"encoding/json"
"log/slog"
"net/http"
)

// ErrorBody is the standard JSON error response shape.
type ErrorBody struct {
Error string `json:"error"`
Message string `json:"message,omitempty"`
}

// JSON writes a JSON body with status code and Content-Type.
func JSON(w http.ResponseWriter, status int, v any) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
if v != nil {
_ = json.NewEncoder(w).Encode(v)
}
}

// OK writes 200 with optional JSON body.
func OK(w http.ResponseWriter, v any) {
if v == nil {
w.WriteHeader(http.StatusOK)
w.Write([]byte("ok"))
return
}
JSON(w, http.StatusOK, v)
}

// Error writes a JSON error response.
func Error(w http.ResponseWriter, status int, err string, message string) {
JSON(w, status, ErrorBody{Error: err, Message: message})
}

// BadRequest writes 400 with error message.
func BadRequest(w http.ResponseWriter, message string) {
Error(w, http.StatusBadRequest, "bad_request", message)
}

// InternalError writes 500 and logs the err.
func InternalError(w http.ResponseWriter, message string, logErr error) {
if logErr != nil {
slog.Error(message, "err", logErr)
}
Error(w, http.StatusInternalServerError, "internal_error", message)
}

// NotFound writes 404.
func NotFound(w http.ResponseWriter, message string) {
Error(w, http.StatusNotFound, "not_found", message)
}

+ 10
- 6
internal/pkg/apiclient/auth.go Просмотреть файл

@@ -3,6 +3,7 @@ package apiclient
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
@@ -17,14 +18,15 @@ type response struct {
func GetToken(ctx context.Context, cfg *config.Config, client *http.Client) (string, error) {
formData := url.Values{}
formData.Set("grant_type", "password")
formData.Set("client_id", "Fastapi")
formData.Set("client_secret", "wojuoB7Z5xhlPFrF2lIxJSSdVHCApEgC")
formData.Set("username", "core")
formData.Set("password", "C0r3_us3r_Cr3d3nt14ls")
formData.Set("audience", "Fastapi")
formData.Set("client_id", cfg.HTTPClientID)
formData.Set("client_secret", cfg.ClientSecret)
formData.Set("username", cfg.HTTPUsername)
formData.Set("password", cfg.HTTPPassword)
formData.Set("audience", cfg.HTTPAudience)

req, err := http.NewRequest("POST", "https://10.251.0.30:10002/realms/API.Server.local/protocol/openid-connect/token", strings.NewReader(formData.Encode()))
req, err := http.NewRequest("POST", fmt.Sprintf("%s/realms/API.Server.local/protocol/openid-connect/token", cfg.APIAuthURL), strings.NewReader(formData.Encode()))
if err != nil {
fmt.Println("error", err)
return "", err
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
@@ -32,12 +34,14 @@ func GetToken(ctx context.Context, cfg *config.Config, client *http.Client) (str
req = req.WithContext(ctx)
res, err := client.Do(req)
if err != nil {
fmt.Println("error", err)
return "", err
}

var j response

if err := json.NewDecoder(res.Body).Decode(&j); err != nil {
fmt.Println("error", err)
return "", err
}



+ 26
- 29
internal/pkg/apiclient/data.go Просмотреть файл

@@ -5,27 +5,31 @@ import (
"fmt"
"net/http"

"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/model"
)

func GetTrackers(token string, client *http.Client) ([]model.Tracker, error) {
res, err := getRequest(token, "getTrackers", client)
func GetTrackers(token string, client *http.Client, cfg *config.Config) ([]model.Tracker, error) {
res, err := getRequest(token, "getTrackers", client, cfg)
if err != nil {
fmt.Printf("error get trackers: %+v\n", err)
return []model.Tracker{}, err
}

var i []model.Tracker
err = json.NewDecoder(res.Body).Decode(&i)
if err != nil {
fmt.Printf("error decode trackers: %+v\n", err)
return []model.Tracker{}, err
}

return i, nil
}

func GetGateways(token string, client *http.Client) ([]model.Gateway, error) {
res, err := getRequest(token, "getGateways", client)
func GetGateways(token string, client *http.Client, cfg *config.Config) ([]model.Gateway, error) {
res, err := getRequest(token, "getGateways", client, cfg)
if err != nil {
fmt.Printf("error get gateways: %+v\n", err)
return []model.Gateway{}, err
}

@@ -38,8 +42,8 @@ func GetGateways(token string, client *http.Client) ([]model.Gateway, error) {
return i, nil
}

func GetTrackerZones(token string, client *http.Client) ([]model.TrackerZones, error) {
res, err := getRequest(token, "getTrackerZones", client)
func GetTrackerZones(token string, client *http.Client, cfg *config.Config) ([]model.TrackerZones, error) {
res, err := getRequest(token, "getTrackerZones", client, cfg)
if err != nil {
return []model.TrackerZones{}, err
}
@@ -53,8 +57,8 @@ func GetTrackerZones(token string, client *http.Client) ([]model.TrackerZones, e
return i, nil
}

func GetZones(token string, client *http.Client) ([]model.Zone, error) {
res, err := getRequest(token, "getZones", client)
func GetZones(token string, client *http.Client, cfg *config.Config) ([]model.Zone, error) {
res, err := getRequest(token, "getZones", client, cfg)
if err != nil {
return []model.Zone{}, err
}
@@ -68,35 +72,28 @@ func GetZones(token string, client *http.Client) ([]model.Zone, error) {
return i, nil
}

func GetTracks(token string, client *http.Client) ([]model.Tracks, error) {
res, err := getRequest(token, "getTracks", client)
if err != nil {
return []model.Tracks{}, err
}

var i []model.Tracks
err = json.NewDecoder(res.Body).Decode(&i)
func InferPosition(token string, client *http.Client, cfg *config.Config) (model.PositionResponse, error) {
url := fmt.Sprintf("%s/ble-ai/infer", cfg.APIBaseURL)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return []model.Tracks{}, err
fmt.Printf("error new request: %+v\n", err)
return model.PositionResponse{}, err
}

return i, nil
}
setHeader(req, token)

func getRequest(token, route string, client *http.Client) (*http.Response, error) {
url := fmt.Sprintf("https://10.251.0.30:5050/reslevis/%s", route)
req, err := http.NewRequest("GET", url, nil)
res, err := client.Do(req)
if err != nil {
return nil, err
fmt.Printf("error do request: %+v\n", err)
return model.PositionResponse{}, err
}

header := fmt.Sprintf("Bearer %s", token)

req.Header.Add("Authorization", header)
res, err := client.Do(req)
var i model.PositionResponse
err = json.NewDecoder(res.Body).Decode(&i)
if err != nil {
return nil, err
fmt.Printf("error decode response: %+v\n", err)
return model.PositionResponse{}, err
}

return res, nil
return i, nil
}

+ 19
- 12
internal/pkg/apiclient/updatedb.go Просмотреть файл

@@ -4,6 +4,7 @@ import (
"context"
"crypto/tls"
"fmt"
"log/slog"
"net/http"
"reflect"

@@ -27,10 +28,11 @@ func UpdateDB(db *gorm.DB, ctx context.Context, cfg *config.Config, writer *kafk
return err
}

if trackers, err := GetTrackers(token, client); err == nil {
if trackers, err := GetTrackers(token, client, cfg); err == nil {
syncTable(db, trackers)
if err := controller.SendKafkaMessage(writer, &model.ApiUpdate{Method: "DELETE", MAC: "all"}, ctx); err != nil {
fmt.Printf("Error in sending delete all from lookup message: %v", err)
msg := fmt.Sprintf("Error in sending delete all from lookup message: %v", err)
slog.Error(msg)
}

for _, v := range trackers {
@@ -41,32 +43,37 @@ func UpdateDB(db *gorm.DB, ctx context.Context, cfg *config.Config, writer *kafk
}

if err := controller.SendKafkaMessage(writer, &apiUpdate, ctx); err != nil {
fmt.Printf("Error in sending POST kafka message: %v", err)
msg := fmt.Sprintf("Error in sending POST kafka message: %v", err)
slog.Error(msg)
}
}
}

if gateways, err := GetGateways(token, client); err == nil {
if gateways, err := GetGateways(token, client, cfg); err == nil {
syncTable(db, gateways)
}

// if tracks, err := GetTracks(token, client); err == nil {
// fmt.Printf("Tracks: %+v\n", tracks)
// syncTable(db, tracks)
// }

if zones, err := GetZones(token, client); err == nil {
if zones, err := GetZones(token, client, cfg); err == nil {
syncTable(db, zones)
}

if trackerZones, err := GetTrackerZones(token, client); err == nil {
if trackerZones, err := GetTrackerZones(token, client, cfg); err == nil {
syncTable(db, trackerZones)
}

if inferredPosition, err := InferPosition(token, client, cfg); err == nil {
for _, v := range inferredPosition.Items {
mac := convertMac(v.Mac)
fmt.Println(mac)
db.Model(&model.Tracker{}).Where("mac = ?", mac).Update("x", v.X).Update("y", v.Y)
}
}

var settings model.Settings
db.First(&settings)
if settings.ID == 0 {
fmt.Println("settings are empty")
msg := "settings are empty"
slog.Info(msg)
db.Create(appState.GetSettings())
}



+ 28
- 0
internal/pkg/apiclient/utils.go Просмотреть файл

@@ -0,0 +1,28 @@
package apiclient

import (
"fmt"
"net/http"
"strings"

"github.com/AFASystems/presence/internal/pkg/config"
)

func setHeader(req *http.Request, token string) {
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
}

func getRequest(token, route string, client *http.Client, cfg *config.Config) (*http.Response, error) {
url := fmt.Sprintf("%s/reslevis/%s", cfg.APIBaseURL, route)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}

setHeader(req, token)
return client.Do(req)
}

func convertMac(mac string) string {
return strings.ToUpper(strings.ReplaceAll(mac, ":", ""))
}

+ 76
- 0
internal/pkg/bridge/handler.go Просмотреть файл

@@ -0,0 +1,76 @@
package bridge

import (
"context"
"encoding/json"
"log/slog"
"strings"
"time"

"github.com/AFASystems/presence/internal/pkg/model"
"github.com/segmentio/kafka-go"
)

// RawBeaconWriter writes beacon advertisements to the rawbeacons topic.
type RawBeaconWriter interface {
WriteMessages(ctx context.Context, msgs ...kafka.Message) error
}

// BeaconLookup provides MAC->ID lookup (e.g. AppState).
type BeaconLookup interface {
BeaconExists(mac string) (id string, ok bool)
}

// HandleMQTTMessage processes an MQTT message: parses JSON array of RawReading or CSV.
// For JSON, converts each reading to BeaconAdvertisement and writes to the writer if MAC is in lookup.
// Hostname is derived from topic (e.g. "publish_out/gateway1" -> "gateway1"). Safe if topic has no "/".
func HandleMQTTMessage(topic string, payload []byte, lookup BeaconLookup, writer RawBeaconWriter) {
parts := strings.SplitN(topic, "/", 2)
hostname := ""
if len(parts) >= 2 {
hostname = parts[1]
}

msgStr := string(payload)
if strings.HasPrefix(msgStr, "[") {
var readings []model.RawReading
if err := json.Unmarshal(payload, &readings); err != nil {
slog.Error("parsing MQTT JSON", "err", err, "topic", topic)
return
}
for _, reading := range readings {
if reading.Type == "Gateway" {
continue
}
id, ok := lookup.BeaconExists(reading.MAC)
if !ok {
continue
}
adv := model.BeaconAdvertisement{
ID: id,
Hostname: hostname,
MAC: reading.MAC,
RSSI: int64(reading.RSSI),
Data: reading.RawData,
}
encoded, err := json.Marshal(adv)
if err != nil {
slog.Error("marshaling beacon advertisement", "err", err)
break
}
if err := writer.WriteMessages(context.Background(), kafka.Message{Value: encoded}); err != nil {
slog.Error("writing to Kafka", "err", err)
time.Sleep(1 * time.Second)
break
}
}
return
}
// CSV format: validate minimum fields (e.g. 6 columns); full parsing can be added later
s := strings.Split(msgStr, ",")
if len(s) < 6 {
slog.Error("invalid CSV MQTT message", "topic", topic, "message", msgStr)
return
}
slog.Debug("CSV MQTT message received", "topic", topic, "fields", len(s))
}

+ 61
- 0
internal/pkg/bridge/mqtt.go Просмотреть файл

@@ -0,0 +1,61 @@
package bridge

import (
"fmt"
"log/slog"

"github.com/AFASystems/presence/internal/pkg/config"
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/google/uuid"
)

const defaultMQTTPort = 1883
const subscribeTopic = "publish_out/#"
const disconnectQuiesceMs = 250

// MQTTClient wraps paho MQTT client and options.
type MQTTClient struct {
Client mqtt.Client
}

// NewMQTTClient creates and connects an MQTT client. Returns error instead of panic on connect failure.
func NewMQTTClient(cfg *config.Config, publishHandler func(mqtt.Message)) (*MQTTClient, error) {
opts := mqtt.NewClientOptions()
opts.AddBroker(fmt.Sprintf("tcp://%s:%d", cfg.MQTTHost, defaultMQTTPort))
opts.SetClientID(fmt.Sprintf("bridge-%s", uuid.New().String()))
opts.SetAutoReconnect(true)
opts.SetConnectRetry(true)
opts.SetConnectRetryInterval(config.SMALL_TICKER_INTERVAL)
opts.SetMaxReconnectInterval(config.LARGE_TICKER_INTERVAL)
opts.SetCleanSession(false)
opts.SetDefaultPublishHandler(func(c mqtt.Client, m mqtt.Message) {
publishHandler(m)
})
opts.OnConnect = func(client mqtt.Client) {
slog.Info("MQTT connected")
}
opts.OnConnectionLost = func(client mqtt.Client, err error) {
slog.Error("MQTT connection lost", "err", err)
}

client := mqtt.NewClient(opts)
token := client.Connect()
token.Wait()
if err := token.Error(); err != nil {
return nil, fmt.Errorf("mqtt connect: %w", err)
}
return &MQTTClient{Client: client}, nil
}

// Subscribe subscribes to the default bridge topic.
func (m *MQTTClient) Subscribe() {
token := m.Client.Subscribe(subscribeTopic, 1, nil)
token.Wait()
slog.Info("MQTT subscribed", "topic", subscribeTopic)
}

// Disconnect disconnects the client with quiesce.
func (m *MQTTClient) Disconnect() {
m.Client.Disconnect(disconnectQuiesceMs)
slog.Info("MQTT disconnected")
}

+ 23
- 28
internal/pkg/common/appcontext/context.go Просмотреть файл

@@ -2,6 +2,7 @@ package appcontext

import (
"fmt"
"log/slog"

"github.com/AFASystems/presence/internal/pkg/model"
"github.com/mitchellh/mapstructure"
@@ -10,10 +11,9 @@ import (
// AppState provides centralized access to application state
type AppState struct {
beacons model.BeaconsList
httpResults model.HTTPResultList
settings model.Settings
beaconEvents model.BeaconEventList
beaconsLookup map[string]string
beaconsLookup model.BeaconsLookup
}

// NewAppState creates a new application context AppState with default values
@@ -22,9 +22,6 @@ func NewAppState() *AppState {
beacons: model.BeaconsList{
Beacons: make(map[string]model.Beacon),
},
httpResults: model.HTTPResultList{
Results: make(map[string]model.HTTPResult),
},
settings: model.Settings{
ID: 1,
CurrentAlgorithm: "filter", // possible values filter or AI
@@ -39,12 +36,16 @@ func NewAppState() *AppState {
beaconEvents: model.BeaconEventList{
Beacons: make(map[string]model.BeaconEvent),
},
beaconsLookup: make(map[string]string),
beaconsLookup: model.BeaconsLookup{
Lookup: make(map[string]string),
},
}
}

// GetBeacons returns thread-safe access to beacons list
func (m *AppState) GetBeacons() *model.BeaconsList {
m.beacons.Lock.RLock()
defer m.beacons.Lock.RUnlock()
return &m.beacons
}

@@ -55,43 +56,36 @@ func (m *AppState) GetSettings() *model.Settings {

// GetBeaconEvents returns thread-safe access to beacon events
func (m *AppState) GetBeaconEvents() *model.BeaconEventList {
m.beaconEvents.Lock.RLock()
defer m.beaconEvents.Lock.RUnlock()
return &m.beaconEvents
}

// GetBeaconsLookup returns thread-safe access to beacon lookup map
func (m *AppState) GetBeaconsLookup() map[string]string {
return m.beaconsLookup
}

// AddBeaconToLookup adds a beacon ID to the lookup map
func (m *AppState) AddBeaconToLookup(id, value string) {
m.beaconsLookup[id] = value
m.beaconsLookup.Lock.Lock()
m.beaconsLookup.Lookup[id] = value
m.beaconsLookup.Lock.Unlock()
}

// RemoveBeaconFromLookup removes a beacon ID from the lookup map
func (m *AppState) RemoveBeaconFromLookup(id string) {
delete(m.beaconsLookup, id)
m.beaconsLookup.Lock.Lock()
delete(m.beaconsLookup.Lookup, id)
m.beaconsLookup.Lock.Unlock()
}

func (m *AppState) CleanLookup() {
clear(m.beaconsLookup)
}

func (m *AppState) RemoveBeacon(id string) {
m.beacons.Lock.Lock()
delete(m.beacons.Beacons, id)
m.beacons.Lock.Unlock()
}

func (m *AppState) RemoveHTTPResult(id string) {
m.httpResults.Lock.Lock()
delete(m.httpResults.Results, id)
m.httpResults.Lock.Unlock()
m.beaconsLookup.Lock.Lock()
clear(m.beaconsLookup.Lookup)
m.beaconsLookup.Lock.Unlock()
}

// BeaconExists checks if a beacon exists in the lookup
func (m *AppState) BeaconExists(id string) (string, bool) {
val, exists := m.beaconsLookup[id]
m.beaconsLookup.Lock.RLock()
defer m.beaconsLookup.Lock.RUnlock()
val, exists := m.beaconsLookup.Lookup[id]
return val, exists
}

@@ -157,6 +151,7 @@ func (m *AppState) GetSettingsValue() model.Settings {
// UpdateSettings updates the system settings (thread-safe)
func (m *AppState) UpdateSettings(settings map[string]any) {
if err := mapstructure.Decode(settings, &m.settings); err != nil {
fmt.Printf("Error in persisting settings: %v\n", err)
msg := fmt.Sprintf("Error in persisting settings: %v", err)
slog.Error(msg)
}
}

+ 92
- 12
internal/pkg/config/config.go Просмотреть файл

@@ -1,6 +1,10 @@
package config

import "os"
import (
"fmt"
"os"
"time"
)

type Config struct {
HTTPAddr string
@@ -19,9 +23,13 @@ type Config struct {
HTTPUsername string
HTTPPassword string
HTTPAudience string
ConfigPath string
APIBaseURL string
APIAuthURL string
// TLSInsecureSkipVerify enables skipping TLS cert verification (e.g. for dev); default false.
TLSInsecureSkipVerify bool
}

// getEnv returns env var value or a default if not set.
func getEnv(key, def string) string {
if v := os.Getenv(key); v != "" {
return v
@@ -29,23 +37,95 @@ func getEnv(key, def string) string {
return def
}

func getEnvBool(key string, defaultVal bool) bool {
switch os.Getenv(key) {
case "1", "true", "TRUE", "yes":
return true
case "0", "false", "FALSE", "no":
return false
}
return defaultVal
}

func getEnvPanic(key string) string {
if v := os.Getenv(key); v != "" {
return v
}
panic(fmt.Sprintf("environment variable %s is not set", key))
}

func Load() *Config {
return &Config{
HTTPAddr: getEnv("HTTP_HOST_PATH", "0.0.0.0:1902"),
WSAddr: getEnv("HTTPWS_HOST_PATH", "0.0.0.0:8088"),
MQTTHost: getEnv("MQTT_HOST", "192.168.1.101"),
MQTTUser: getEnv("MQTT_USERNAME", "user"),
MQTTPass: getEnv("MQTT_PASSWORD", "pass"),
MQTTClientID: getEnv("MQTT_CLIENT_ID", "presence-detector"),
MQTTUser: getEnvPanic("MQTT_USERNAME"),
MQTTPass: getEnvPanic("MQTT_PASSWORD"),
MQTTClientID: getEnvPanic("MQTT_CLIENT_ID"),
KafkaURL: getEnv("KAFKA_URL", "127.0.0.1:9092"),
DBHost: getEnv("DBHost", "127.0.0.1"),
DBUser: getEnv("DBUser", "postgres"),
DBPass: getEnv("DBPass", "postgres"),
DBUser: getEnvPanic("DBUser"),
DBPass: getEnvPanic("DBPass"),
DBName: getEnv("DBName", "go_crud_db"),
HTTPClientID: getEnv("HTTPClientID", "Fastapi"),
ClientSecret: getEnv("ClientSecret", "wojuoB7Z5xhlPFrF2lIxJSSdVHCApEgC"),
HTTPUsername: getEnv("HTTPUsername", "core"),
HTTPPassword: getEnv("HTTPPassword", "C0r3_us3r_Cr3d3nt14ls"),
HTTPAudience: getEnv("HTTPAudience", "Fastapi"),
HTTPClientID: getEnvPanic("HTTPClientID"),
ClientSecret: getEnvPanic("ClientSecret"),
HTTPUsername: getEnvPanic("HTTPUsername"),
HTTPPassword: getEnvPanic("HTTPPassword"),
HTTPAudience: getEnvPanic("HTTPAudience"),
ConfigPath: getEnv("CONFIG_PATH", "/app/cmd/server/config.json"),
APIBaseURL: getEnv("API_BASE_URL", "https://10.251.0.30:5050"),
TLSInsecureSkipVerify: getEnvBool("TLS_INSECURE_SKIP_VERIFY", false),
}
}

func LoadDecoder() *Config {
return &Config{
KafkaURL: getEnv("KAFKA_URL", "127.0.0.1:9092"),
}
}

func LoadServer() *Config {
return &Config{
KafkaURL: getEnv("KAFKA_URL", "127.0.0.1:9092"),
HTTPAddr: getEnv("HTTP_HOST_PATH", "0.0.0.0:1902"),
DBHost: getEnv("DBHost", "127.0.0.1"),
DBUser: getEnvPanic("DBUser"),
DBPass: getEnvPanic("DBPass"),
DBName: getEnv("DBName", "go_crud_db"),
HTTPClientID: getEnvPanic("HTTPClientID"),
ClientSecret: getEnvPanic("ClientSecret"),
HTTPUsername: getEnvPanic("HTTPUsername"),
HTTPPassword: getEnvPanic("HTTPPassword"),
HTTPAudience: getEnvPanic("HTTPAudience"),
ConfigPath: getEnv("CONFIG_PATH", "/app/cmd/server/config.json"),
APIBaseURL: getEnv("API_BASE_URL", "https://10.251.0.30:5050"),
APIAuthURL: getEnv("API_AUTH_URL", "https://10.251.0.30:10002"),
TLSInsecureSkipVerify: getEnvBool("TLS_INSECURE_SKIP_VERIFY", false),
}
}

func LoadBridge() *Config {
return &Config{
KafkaURL: getEnv("KAFKA_URL", "127.0.0.1:9092"),
MQTTHost: getEnv("MQTT_HOST", "192.168.1.101"),
MQTTUser: getEnvPanic("MQTT_USERNAME"),
MQTTPass: getEnvPanic("MQTT_PASSWORD"),
MQTTClientID: getEnvPanic("MQTT_CLIENT_ID"),
}
}

func LoadLocation() *Config {
return &Config{
KafkaURL: getEnv("KAFKA_URL", "127.0.0.1:9092"),
TLSInsecureSkipVerify: getEnvBool("TLS_INSECURE_SKIP_VERIFY", false),
}
}

const (
SMALL_CHANNEL_SIZE = 200
MEDIUM_CHANNEL_SIZE = 500
LARGE_CHANNEL_SIZE = 2000
SMALL_TICKER_INTERVAL = 1 * time.Second
MEDIUM_TICKER_INTERVAL = 2 * time.Second
LARGE_TICKER_INTERVAL = 5 * time.Second
)

+ 7
- 3
internal/pkg/controller/parser_controller.go Просмотреть файл

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"

"github.com/AFASystems/presence/internal/pkg/model"
@@ -32,7 +33,8 @@ func ParserAddController(db *gorm.DB, writer *kafka.Writer, ctx context.Context)

if err := service.SendParserConfig(kp, writer, ctx); err != nil {
http.Error(w, "Unable to send parser config to kafka broker", 400)
fmt.Printf("Unable to send parser config to kafka broker %v\n", err)
msg := fmt.Sprintf("Unable to send parser config to kafka broker %v", err)
slog.Error(msg)
return
}

@@ -69,7 +71,8 @@ func ParserDeleteController(db *gorm.DB, writer *kafka.Writer, ctx context.Conte

if err := service.SendParserConfig(kp, writer, ctx); err != nil {
http.Error(w, "Unable to send parser config to kafka broker", 400)
fmt.Printf("Unable to send parser config to kafka broker %v\n", err)
msg := fmt.Sprintf("Unable to send parser config to kafka broker %v", err)
slog.Error(msg)
return
}

@@ -103,7 +106,8 @@ func ParserUpdateController(db *gorm.DB, writer *kafka.Writer, ctx context.Conte
db.Save(&config)
if err := service.SendParserConfig(kp, writer, ctx); err != nil {
http.Error(w, "Unable to send parser config to kafka broker", 400)
fmt.Printf("Unable to send parser config to kafka broker %v\n", err)
msg := fmt.Sprintf("Unable to send parser config to kafka broker %v", err)
slog.Error(msg)
return
}



+ 15
- 5
internal/pkg/controller/settings_controller.go Просмотреть файл

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"

"github.com/AFASystems/presence/internal/pkg/model"
@@ -33,9 +34,12 @@ func SettingsUpdateController(db *gorm.DB, writer *kafka.Writer, ctx context.Con
return
}

fmt.Printf("updates: %+v\n", updates)
inMsg := fmt.Sprintf("updates: %+v", updates)
slog.Info(inMsg)

if err := db.Model(&model.Settings{}).Where("id = ?", 1).Updates(updates).Error; err != nil {
msg := fmt.Sprintf("Error in updating settings: %v", err)
slog.Error(msg)
http.Error(w, err.Error(), 500)
return
}
@@ -43,16 +47,22 @@ func SettingsUpdateController(db *gorm.DB, writer *kafka.Writer, ctx context.Con
eMsg, err := json.Marshal(updates)
if err != nil {
http.Error(w, "Error in marshaling settings updates", 400)
msg := fmt.Sprintf("Error in marshaling settings updates: %v", err)
slog.Error(msg)
return
}

msg := kafka.Message{
Value: eMsg,
}

fmt.Printf("Kafka message: %+v\n", eMsg)

writer.WriteMessages(ctx, msg)
if err := writer.WriteMessages(ctx, msg); err != nil {
slog.Error("writing settings to Kafka", "err", err)
http.Error(w, "Failed to publish settings update", 500)
return
}

w.Write([]byte("Settings updated"))
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"status":"Settings updated"}`))
}
}

+ 12
- 6
internal/pkg/controller/trackers_controller.go Просмотреть файл

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"

"github.com/AFASystems/presence/internal/pkg/model"
@@ -15,7 +16,8 @@ import (
func SendKafkaMessage(writer *kafka.Writer, value *model.ApiUpdate, ctx context.Context) error {
valueStr, err := json.Marshal(&value)
if err != nil {
fmt.Println("error in encoding: ", err)
msg := fmt.Sprintf("error in encoding: %v", err)
slog.Error(msg)
return err
}
msg := kafka.Message{
@@ -23,7 +25,8 @@ func SendKafkaMessage(writer *kafka.Writer, value *model.ApiUpdate, ctx context.
}

if err := writer.WriteMessages(ctx, msg); err != nil {
fmt.Println("Error in sending kafka message: ", err)
msg := fmt.Sprintf("Error in sending kafka message: %v", err)
slog.Error(msg)
return err
}

@@ -46,7 +49,8 @@ func TrackerAdd(db *gorm.DB, writer *kafka.Writer, ctx context.Context) http.Han
}

if err := SendKafkaMessage(writer, &apiUpdate, ctx); err != nil {
fmt.Println("error in sending Kafka POST message")
msg := "error in sending Kafka POST message"
slog.Error(msg)
http.Error(w, "Error in sending kafka message", 500)
return
}
@@ -101,13 +105,15 @@ func TrackerDelete(db *gorm.DB, writer *kafka.Writer, ctx context.Context) http.

apiUpdate := model.ApiUpdate{
Method: "DELETE",
ID: tracker.ID,
MAC: tracker.MAC,
}

fmt.Printf("Sending DELETE tracker id: %s message\n", id)
msg := fmt.Sprintf("Sending DELETE tracker id: %s message", id)
slog.Info(msg)

if err := SendKafkaMessage(writer, &apiUpdate, ctx); err != nil {
fmt.Println("error in sending Kafka DELETE message")
msg := "error in sending Kafka DELETE message"
slog.Error(msg)
http.Error(w, "Error in sending kafka message", 500)
return
}


+ 3
- 3
internal/pkg/database/database.go Просмотреть файл

@@ -2,6 +2,7 @@ package database

import (
"fmt"
"log/slog"

"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/model"
@@ -9,8 +10,6 @@ import (
"gorm.io/gorm"
)

var DB *gorm.DB

func Connect(cfg *config.Config) (*gorm.DB, error) {
// Connect to PostgreSQL database
dsn := fmt.Sprintf(
@@ -30,6 +29,7 @@ func Connect(cfg *config.Config) (*gorm.DB, error) {
return nil, err
}

fmt.Println("Database connection established")
msg := "Database connection established"
slog.Info(msg)
return db, nil
}

+ 71
- 0
internal/pkg/decoder/process.go Просмотреть файл

@@ -0,0 +1,71 @@
package decoder

import (
"bytes"
"context"
"encoding/hex"
"fmt"
"log/slog"
"strings"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/common/utils"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/segmentio/kafka-go"
)

// AlertWriter writes decoded beacon events (e.g. to alertbeacons topic).
type AlertWriter interface {
WriteMessages(ctx context.Context, msgs ...kafka.Message) error
}

// ProcessIncoming decodes a beacon advertisement and writes the event to the writer if it changed.
func ProcessIncoming(adv model.BeaconAdvertisement, appState *appcontext.AppState, writer AlertWriter, registry *model.ParserRegistry) {
if err := DecodeBeacon(adv, appState, writer, registry); err != nil {
slog.Error("decoding beacon", "err", err, "id", adv.ID)
}
}

// DecodeBeacon hex-decodes the payload, runs the parser registry, dedupes by event hash, and writes to writer.
func DecodeBeacon(adv model.BeaconAdvertisement, appState *appcontext.AppState, writer AlertWriter, registry *model.ParserRegistry) error {
beacon := strings.TrimSpace(adv.Data)
id := adv.ID
if beacon == "" {
return nil
}

b, err := hex.DecodeString(beacon)
if err != nil {
return err
}

b = utils.RemoveFlagBytes(b)
indices := utils.ParseADFast(b)
event := utils.LoopADStructures(b, indices, id, registry)

if event.ID == "" {
return nil
}

prevEvent, ok := appState.GetBeaconEvent(id)
appState.UpdateBeaconEvent(id, event)

if event.Type == "iBeacon" {
event.BtnPressed = true
}

if ok && bytes.Equal(prevEvent.Hash(), event.Hash()) {
return nil
}

eMsg, err := event.ToJSON()
if err != nil {
return err
}

if err := writer.WriteMessages(context.Background(), kafka.Message{Value: eMsg}); err != nil {
return fmt.Errorf("write alert: %w", err)
}

return nil
}

+ 7
- 3
internal/pkg/kafkaclient/consumer.go Просмотреть файл

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"sync"

"github.com/segmentio/kafka-go"
@@ -14,18 +15,21 @@ func Consume[T any](r *kafka.Reader, ch chan<- T, ctx context.Context, wg *sync.
for {
select {
case <-ctx.Done():
fmt.Println("consumer closed")
msg := "consumer closed"
slog.Info(msg)
return
default:
msg, err := r.ReadMessage(ctx)
if err != nil {
fmt.Println("error reading message:", err)
msg := fmt.Sprintf("error reading message: %v", err)
slog.Error(msg)
continue
}

var data T
if err := json.Unmarshal(msg.Value, &data); err != nil {
fmt.Println("error decoding:", err)
msg := fmt.Sprintf("error decoding: %v", err)
slog.Error(msg)
continue
}



+ 15
- 9
internal/pkg/kafkaclient/manager.go Просмотреть файл

@@ -2,6 +2,7 @@ package kafkaclient

import (
"fmt"
"log/slog"
"strings"
"sync"
"time"
@@ -52,15 +53,18 @@ func (m *KafkaManager) AddKafkaWriter(kafkaUrl, topic string) {
}

func (m *KafkaManager) CleanKafkaWriters() {
fmt.Println("shutdown of kafka readers starts")
msg := "shutdown of kafka writers starts"
slog.Info(msg)
m.kafkaWritersMap.KafkaWritersLock.Lock()
for _, r := range m.kafkaWritersMap.KafkaWriters {
if err := r.Close(); err != nil {
fmt.Printf("Error in closing kafka writer %v", err)
msg := fmt.Sprintf("Error in closing kafka writer %v", err)
slog.Error(msg)
}
}
m.kafkaWritersMap.KafkaWritersLock.Unlock()
fmt.Println("Kafka writers graceful shutdown complete")
msg = "Kafka writers graceful shutdown complete"
slog.Info(msg)
}

func (m *KafkaManager) AddKafkaReader(kafkaUrl, topic, groupID string) {
@@ -82,11 +86,13 @@ func (m *KafkaManager) CleanKafkaReaders() {
m.kafkaReadersMap.KafkaReadersLock.Lock()
for _, r := range m.kafkaReadersMap.KafkaReaders {
if err := r.Close(); err != nil {
fmt.Printf("Error in closing kafka reader %v", err)
msg := fmt.Sprintf("Error in closing kafka reader %v", err)
slog.Error(msg)
}
}
m.kafkaReadersMap.KafkaReadersLock.Unlock()
fmt.Println("Kafka readers graceful shutdown complete")
msg := "Kafka readers graceful shutdown complete"
slog.Info(msg)
}

func (m *KafkaManager) PopulateKafkaManager(url, name string, topics []string) {
@@ -101,13 +107,13 @@ func (m *KafkaManager) PopulateKafkaManager(url, name string, topics []string) {
}

func (m *KafkaManager) GetReader(topic string) *kafka.Reader {
m.kafkaReadersMap.KafkaReadersLock.Lock()
defer m.kafkaReadersMap.KafkaReadersLock.Unlock()
m.kafkaReadersMap.KafkaReadersLock.RLock()
defer m.kafkaReadersMap.KafkaReadersLock.RUnlock()
return m.kafkaReadersMap.KafkaReaders[topic]
}

func (m *KafkaManager) GetWriter(topic string) *kafka.Writer {
m.kafkaWritersMap.KafkaWritersLock.Lock()
defer m.kafkaWritersMap.KafkaWritersLock.Unlock()
m.kafkaWritersMap.KafkaWritersLock.RLock()
defer m.kafkaWritersMap.KafkaWritersLock.RUnlock()
return m.kafkaWritersMap.KafkaWriters[topic]
}

+ 51
- 0
internal/pkg/location/assign.go Просмотреть файл

@@ -0,0 +1,51 @@
package location

import (
"log/slog"
"time"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/common/utils"
"github.com/AFASystems/presence/internal/pkg/model"
)

// AssignBeaconToList updates app state with a new beacon advertisement: appends a metric
// to the beacon's sliding window and updates last seen.
func AssignBeaconToList(adv model.BeaconAdvertisement, appState *appcontext.AppState) {
id := adv.ID
now := time.Now().Unix()
settings := appState.GetSettingsValue()

if settings.RSSIEnforceThreshold && int64(adv.RSSI) < settings.RSSIMinThreshold {
slog.Debug("settings RSSI threshold filter", "id", id)
return
}

beacon, ok := appState.GetBeacon(id)
if !ok {
beacon = model.Beacon{ID: id}
}

beacon.IncomingJSON = adv
beacon.LastSeen = now

if beacon.BeaconMetrics == nil {
beacon.BeaconMetrics = make([]model.BeaconMetric, 0, settings.BeaconMetricSize)
}

metric := model.BeaconMetric{
Distance: utils.CalculateDistance(adv),
Timestamp: now,
RSSI: int64(adv.RSSI),
Location: adv.Hostname,
}

if len(beacon.BeaconMetrics) >= settings.BeaconMetricSize {
copy(beacon.BeaconMetrics, beacon.BeaconMetrics[1:])
beacon.BeaconMetrics[settings.BeaconMetricSize-1] = metric
} else {
beacon.BeaconMetrics = append(beacon.BeaconMetrics, metric)
}

appState.UpdateBeacon(id, beacon)
}

+ 96
- 0
internal/pkg/location/filter.go Просмотреть файл

@@ -0,0 +1,96 @@
package location

import (
"context"
"encoding/json"
"log/slog"
"time"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/segmentio/kafka-go"
)

// Score weights for location algorithm (configurable via constants).
const (
SeenWeight = 1.5
RSSIWeight = 0.75
DefaultDistance = 999
DefaultLastSeen = 999
)

// LocationWriter writes location events (e.g. to Kafka).
type LocationWriter interface {
WriteMessages(ctx context.Context, msgs ...kafka.Message) error
}

// GetLikelyLocations runs the filter algorithm: scores beacons by RSSI and seen count,
// updates app state with best location and confidence, and writes HTTPLocation to the writer.
func GetLikelyLocations(appState *appcontext.AppState, writer LocationWriter) {
ctx := context.Background()
beacons := appState.GetAllBeacons()
settings := appState.GetSettingsValue()

for _, beacon := range beacons {
r := model.HTTPLocation{
Method: "Standard",
Distance: DefaultDistance,
ID: beacon.ID,
Location: "",
LastSeen: DefaultLastSeen,
}

mSize := len(beacon.BeaconMetrics)
if mSize == 0 {
continue
}

if (int64(time.Now().Unix()) - beacon.BeaconMetrics[mSize-1].Timestamp) > settings.LastSeenThreshold {
slog.Warn("beacon is too old", "id", beacon.ID)
continue
}

locList := make(map[string]float64)
for _, metric := range beacon.BeaconMetrics {
res := SeenWeight + (RSSIWeight * (1.0 - (float64(metric.RSSI) / -100.0)))
locList[metric.Location] += res
}

bestLocName := ""
maxScore := 0.0
for locName, score := range locList {
if score > maxScore {
maxScore = score
bestLocName = locName
}
}

if bestLocName == beacon.PreviousLocation {
beacon.LocationConfidence++
} else {
beacon.LocationConfidence = 0
}

r.Distance = beacon.BeaconMetrics[mSize-1].Distance
r.Location = bestLocName
r.LastSeen = beacon.BeaconMetrics[mSize-1].Timestamp
r.RSSI = beacon.BeaconMetrics[mSize-1].RSSI

if beacon.LocationConfidence == settings.LocationConfidence && beacon.PreviousConfidentLocation != bestLocName {
beacon.LocationConfidence = 0
}

beacon.PreviousLocation = bestLocName
appState.UpdateBeacon(beacon.ID, beacon)

js, err := json.Marshal(r)
if err != nil {
slog.Error("marshaling location", "err", err, "beacon_id", beacon.ID)
continue
}

if err := writer.WriteMessages(ctx, kafka.Message{Value: js}); err != nil {
slog.Error("sending kafka location message", "err", err, "beacon_id", beacon.ID)
}
}
}

+ 41
- 0
internal/pkg/location/inference.go Просмотреть файл

@@ -0,0 +1,41 @@
package location

import (
"context"
"crypto/tls"
"net/http"

"github.com/AFASystems/presence/internal/pkg/apiclient"
"github.com/AFASystems/presence/internal/pkg/config"
"github.com/AFASystems/presence/internal/pkg/model"
)

// Inferencer returns inferred positions (e.g. from an AI/ML service).
type Inferencer interface {
Infer(ctx context.Context, cfg *config.Config) (model.PositionResponse, error)
}

// DefaultInferencer uses apiclient to get token and call the inference API.
type DefaultInferencer struct {
Client *http.Client
}

// NewDefaultInferencer creates an inferencer with optional TLS skip verify (e.g. from config.TLSInsecureSkipVerify).
func NewDefaultInferencer(skipTLSVerify bool) *DefaultInferencer {
tr := &http.Transport{}
if skipTLSVerify {
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
return &DefaultInferencer{
Client: &http.Client{Transport: tr},
}
}

// Infer gets a token and calls the inference API.
func (d *DefaultInferencer) Infer(ctx context.Context, cfg *config.Config) (model.PositionResponse, error) {
token, err := apiclient.GetToken(ctx, cfg, d.Client)
if err != nil {
return model.PositionResponse{}, err
}
return apiclient.InferPosition(token, d.Client, cfg)
}

+ 7
- 5
internal/pkg/logger/logger.go Просмотреть файл

@@ -2,18 +2,20 @@ package logger

import (
"io"
"log"
"log/slog"
"os"
)

func CreateLogger(fname string) *slog.Logger {
// CreateLogger creates a logger writing to both stderr and the given file.
// If the file cannot be opened, returns a logger that writes only to stderr and a no-op cleanup.
// Callers can check whether logging to file is active if needed.
func CreateLogger(fname string) (*slog.Logger, func()) {
f, err := os.OpenFile(fname, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("Failed to open log file: %v\n", err)
return slog.New(slog.NewJSONHandler(os.Stderr, nil)), func() {}
}
// shell and log file multiwriter
w := io.MultiWriter(os.Stderr, f)
logger := slog.New(slog.NewJSONHandler(w, nil))
return logger
cleanup := func() { f.Close() }
return logger, cleanup
}

+ 3
- 0
internal/pkg/model/parser.go Просмотреть файл

@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/binary"
"fmt"
"log/slog"
"sync"
)

@@ -55,6 +56,8 @@ func (p *ParserRegistry) Register(name string, c Config) {
b := BeaconParser{
CanParse: func(ad []byte) bool {
if len(ad) < 2 {
msg := "Beacon advertisement is too short"
slog.Error(msg)
return false
}
return len(ad) >= c.Min && len(ad) <= c.Max && bytes.HasPrefix(ad[1:], c.GetPatternBytes())


+ 13
- 0
internal/pkg/model/position.go Просмотреть файл

@@ -0,0 +1,13 @@
package model

type PositionResponse struct {
Count int `json:"count"`
Items []Position `json:"items"`
}

type Position struct {
Mac string `json:"mac"`
X float32 `json:"x"`
Y float32 `json:"y"`
Z float32 `json:"z"`
}

+ 2
- 2
internal/pkg/model/trackers.go Просмотреть файл

@@ -14,7 +14,7 @@ type Tracker struct {
Building string `json:"building"`
Location string `json:"location"`
Distance float64 `json:"distance"`
Battery uint32 `json:"battery"`
Battery uint32 `json:"battery,string"`
BatteryThreshold uint32 `json:"batteryThreshold"`
Temperature uint16 `json:"temperature"`
Temperature uint16 `json:"temperature,string"`
}

+ 5
- 5
internal/pkg/model/types.go Просмотреть файл

@@ -104,11 +104,6 @@ type HTTPResult struct {
PreviousConfidentLocation string `json:"previous_confident_location"`
}

type HTTPResultList struct {
Results map[string]HTTPResult
Lock sync.RWMutex
}

// BeaconsList holds all known beacons and their synchronization lock.
type BeaconsList struct {
Beacons map[string]Beacon `json:"beacons"`
@@ -120,6 +115,11 @@ type BeaconEventList struct {
Lock sync.RWMutex
}

type BeaconsLookup struct {
Lookup map[string]string
Lock sync.RWMutex
}

// RawReading represents an incoming raw sensor reading.
type RawReading struct {
Timestamp string `json:"timestamp"`


+ 24
- 7
internal/pkg/service/beacon_service.go Просмотреть файл

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"slices"
"strings"
"time"
@@ -13,19 +14,29 @@ import (
"gorm.io/gorm"
)

func LocationToBeaconService(msg model.HTTPLocation, db *gorm.DB, writer *kafka.Writer, ctx context.Context) {
// KafkaWriter defines the interface for writing Kafka messages (allows mocking in tests)
type KafkaWriter interface {
WriteMessages(ctx context.Context, msgs ...kafka.Message) error
}

func LocationToBeaconService(msg model.HTTPLocation, db *gorm.DB, writer KafkaWriter, ctx context.Context) {
if msg.ID == "" {
fmt.Println("empty ID")
msg := "empty ID"
slog.Error(msg)
return
}

var zones []model.TrackerZones
if err := db.Select("zoneList").Where("tracker = ?", msg.ID).Find(&zones).Error; err != nil {
msg := fmt.Sprintf("Error in selecting zones: %v", err)
slog.Error(msg)
return
}

var tracker model.Tracker
if err := db.Where("id = ?", msg.ID).Find(&tracker).Error; err != nil {
msg := fmt.Sprintf("Error in selecting tracker: %v", err)
slog.Error(msg)
return
}

@@ -37,9 +48,10 @@ func LocationToBeaconService(msg model.HTTPLocation, db *gorm.DB, writer *kafka.
var gw model.Gateway
mac := formatMac(msg.Location)
if err := db.Select("id").Where("mac = ?", mac).First(&gw).Error; err != nil {
fmt.Printf("Gateway not found for MAC: %s\n", mac)
msg := fmt.Sprintf("Gateway not found for MAC: %s", mac)
slog.Error(msg)
return
}
}

if len(allowedZones) != 0 && !slices.Contains(allowedZones, gw.ID) {
alert := model.Alert{
@@ -50,23 +62,28 @@ func LocationToBeaconService(msg model.HTTPLocation, db *gorm.DB, writer *kafka.

eMsg, err := json.Marshal(alert)
if err != nil {
fmt.Println("Error in marshaling")
msg := "Error in marshaling"
slog.Error(msg)
return
} else {
msg := kafka.Message{
Value: eMsg,
}
writer.WriteMessages(ctx, msg)
return
}
}

// status, subject, subject name?
if err := db.Create(&model.Tracks{UUID: msg.ID, Timestamp: time.Now(), Gateway: gw.ID, GatewayMac: gw.MAC, Tracker: msg.ID, Floor: gw.Floor, Building: gw.Building, TrackerMac: tracker.MAC, Signal: msg.RSSI}).Error; err != nil {
fmt.Println("Error in saving distance for beacon: ", err)
msg := fmt.Sprintf("Error in saving distance for beacon: %v", err)
slog.Error(msg)
return
}

if err := db.Updates(&model.Tracker{ID: msg.ID, Location: gw.ID, Distance: msg.Distance, X: gw.X, Y: gw.Y}).Error; err != nil {
fmt.Println("Error in saving distance for beacon: ", err)
msg := fmt.Sprintf("Error in saving distance for beacon: %v", err)
slog.Error(msg)
return
}
}


+ 4
- 3
internal/pkg/service/parser_service.go Просмотреть файл

@@ -8,7 +8,7 @@ import (
"github.com/segmentio/kafka-go"
)

func SendParserConfig(kp model.KafkaParser, writer *kafka.Writer, ctx context.Context) error {
func SendParserConfig(kp model.KafkaParser, writer KafkaWriter, ctx context.Context) error {
eMsg, err := json.Marshal(kp)
if err != nil {
return err
@@ -17,7 +17,8 @@ func SendParserConfig(kp model.KafkaParser, writer *kafka.Writer, ctx context.Co
Value: eMsg,
}

writer.WriteMessages(ctx, msg)

if err := writer.WriteMessages(ctx, msg); err != nil {
return err
}
return nil
}

+ 0
- 40
internal/structure.md Просмотреть файл

@@ -1,40 +0,0 @@
internal/
├── pkg/
│ ├── model/ # All data types, structs, constants
│ │ ├── beacons.go
│ │ ├── settings.go
│ │ ├── context.go # AppContext with locks and maps
│ │ └── types.go
│ │
│ ├── httpserver/ # HTTP + WebSocket handlers
│ │ ├── routes.go # Registers all endpoints
│ │ ├── handlers.go # Core REST handlers
│ │ ├── websocket.go # WS logic (connections, broadcast)
│ │ └── server.go # StartHTTPServer()
│ │
│ ├── mqtt/ # MQTT-specific logic
│ │ ├── processor.go # IncomingMQTTProcessor + helpers
│ │ ├── publisher.go # sendHARoomMessage, sendButtonMessage
│ │ └── filters.go # incomingBeaconFilter, distance helpers
│ │
│ ├── persistence/ # BoltDB helpers
│ │ ├── load.go # LoadState, SaveState
│ │ ├── buckets.go # createBucketIfNotExists
│ │ └── persist_beacons.go
│ │
│ ├── utils/ # Small utility helpers (time, logging, etc.)
│ │ ├── time.go
│ │ ├── logging.go
│ │ └── shell.go
│ │
│ └── config/ # Default values, env vars, flags
│ └── config.go
└── test/
├── httpserver_test/
│ └── beacons_test.go
├── mqtt_test/
│ └── processor_test.go
└── persistence_test/
└── load_test.go

+ 46
- 7
scripts/README.md Просмотреть файл

@@ -1,11 +1,50 @@
# `/scripts`
# Scripts

Scripts to perform various build, install, analysis, etc operations.
Organized by concern. Default server URL is `http://localhost:1902`; override with `BASE_URL`.

These scripts keep the root level Makefile small and simple.
## Layout

Examples:
| Directory | Purpose |
|-----------|--------|
| **api/** | Server API tests and examples |
| **config/** | Server/config operations (settings, parser configs) |
| **auth/** | Auth token (for remote/protected APIs) |
| **seed/** | Dev seed data (e.g. trackers) |

* https://github.com/kubernetes/helm/tree/master/scripts
* https://github.com/cockroachdb/cockroach/tree/master/scripts
* https://github.com/hashicorp/terraform/tree/master/scripts
## API (`api/`)

- **smoke_test.sh** – Full smoke test: gateways, zones, trackerzones, trackers (list, update, delete). Requires `jq`.
```bash
./scripts/api/smoke_test.sh
BASE_URL=http://host:1902 ./scripts/api/smoke_test.sh
```
- **tracks.sh** – Tracks query examples (getTracks with limit, from, to). Optional first arg: tracker UUID.
```bash
./scripts/api/tracks.sh
./scripts/api/tracks.sh <tracker-uuid>
```

## Config (`config/`)

- **settings.sh** – PATCH `/reslevis/settings` (algorithm, thresholds, etc.).
- **add_parser.sh** – POST `/configs/beacons` to add a decoder/parser config (e.g. Eddystone).

## Auth (`auth/`)

- **token.sh** – Get OAuth token from auth server. Set env: `CLIENT_SECRET`, `USERNAME`, `PASSWORD`; optional `AUTH_URL`, `CLIENT_ID`, `AUDIENCE`. Prints token to stdout.
```bash
export CLIENT_SECRET=... USERNAME=... PASSWORD=...
TOKEN=$(./scripts/auth/token.sh)
curl -H "Authorization: Bearer $TOKEN" "$BASE_URL/reslevis/getTrackers"
```

## Seed (`seed/`)

- **seed_trackers.sh** – POST multiple trackers for dev (same payloads as former bulk seed).
```bash
./scripts/seed/seed_trackers.sh
```

## Shared

- **_common.sh** – Sourced by other scripts; sets `BASE_URL` (default `http://localhost:1902`). Do not run directly.

+ 2
- 0
scripts/_common.sh Просмотреть файл

@@ -0,0 +1,2 @@
# Shared defaults for API scripts. Source with: . "$(dirname "$0")/_common.sh"
BASE_URL="${BASE_URL:-http://localhost:1902}"

+ 0
- 16
scripts/adddecoder.sh Просмотреть файл

@@ -1,16 +0,0 @@
#!/bin/bash

SERVER_URL="http://localhost:1902"

curl -X POST "${SERVER_URL}/configs/beacons" \
-H "Content-Type: application/json" \
-d '{
"name": "Eddystone",
"min": 4,
"max": 255,
"pattern": ["0x16", "0xAA", "0xFE", "0x20"],
"configs": {
"battery": {"offset": 6, "length": 2, "order": "bigendian"},
"temperature": {"offset": 8, "length": 2, "order": "fixedpoint"}
}
}'

+ 0
- 246
scripts/api.sh Просмотреть файл

@@ -1,246 +0,0 @@
BASE_URL="http://localhost:1902"

echo "=========================================="
echo "GATEWAY API TESTS"
echo "=========================================="

echo "1. Listing all Gateways"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getGateways" | jq -c '.[]')
GATEWAY_IDS=()

IFS=$'\n'
for r in $LIST
do
echo "$r"
GATEWAY_IDS+=($(echo "$r" | jq -r '.id'))
done

sleep 1

if [ ${#GATEWAY_IDS[@]} -gt 1 ]; then
echo -e "\n\n2. Updating Gateway ${GATEWAY_IDS[1]}"
curl -X PUT "$BASE_URL/reslevis/updateGateway/${GATEWAY_IDS[1]}" \
-H "Content-Type: application/json" \
-d "{
\"id\": \"${GATEWAY_IDS[1]}\",
\"name\": \"GU-100-Updated\",
\"mac\": \"AA:BB:CC:DD:EE:FF\",
\"status\": \"online\",
\"model\": \"MG3\",
\"ip\": \"127.0.0.1\",
\"position\": \"unknown\",
\"x\": 1,
\"y\": 1,
\"notes\": \"some description\",
\"floor\": \"second\",
\"building\": \"hospital\"
}"

sleep 1

echo -e "\n\n3. Listing Gateways after update"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getGateways" | jq -c '.[]')

IFS=$'\n'
for r in $LIST
do
echo "$r"
done

sleep 1

echo -e "\n\n4. Deleting Gateway ${GATEWAY_IDS[1]}"
curl -X DELETE "$BASE_URL/reslevis/removeGateway/${GATEWAY_IDS[1]}"

sleep 1

echo -e "\n\n5. Verifying Delete (List again)..."
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getGateways" | jq -c '.[]')

IFS=$'\n'
for r in $LIST
do
echo "$r"
done
else
echo "Not enough gateways to test update/delete"
fi


echo -e "\n\n=========================================="
echo "ZONE API TESTS"
echo "=========================================="

echo "6. Listing all Zones"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getZones" | jq -c '.[]')
ZONE_IDS=()

IFS=$'\n'
for r in $LIST
do
echo "$r"
ZONE_IDS+=($(echo "$r" | jq -r '.id'))
done

sleep 1

if [ ${#ZONE_IDS[@]} -gt 0 ]; then
echo -e "\n\n7. Updating Zone ${ZONE_IDS[0]}"
curl -X PUT "$BASE_URL/reslevis/updateZone" \
-H "Content-Type: application/json" \
-d "{
\"id\": \"${ZONE_IDS[0]}\",
\"name\": \"Zone-Updated\",
\"groups\": [\"security\", \"logistics\"]
}"

sleep 1

echo -e "\n\n8. Listing Zones after update"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getZones" | jq -c '.[]')

IFS=$'\n'
for r in $LIST
do
echo "$r"
done

sleep 1

echo -e "\n\n9. Deleting Zone ${ZONE_IDS[0]}"
curl -X DELETE "$BASE_URL/reslevis/removeZone/${ZONE_IDS[0]}"

sleep 1

echo -e "\n\n10. Verifying Delete (List again)..."
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getZones" | jq -c '.[]')

IFS=$'\n'
for r in $LIST
do
echo "$r"
done
else
echo "No zones to test update/delete"
fi


echo -e "\n\n=========================================="
echo "TRACKERZONE API TESTS"
echo "=========================================="

echo "11. Listing all TrackerZones"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getTrackerZones" | jq -c '.[]')
TRACKERZONE_IDS=()

IFS=$'\n'
for r in $LIST
do
echo "$r"
TRACKERZONE_IDS+=($(echo "$r" | jq -r '.id'))
done

sleep 1

if [ ${#TRACKERZONE_IDS[@]} -gt 0 ]; then
echo -e "\n\n12. Updating TrackerZone ${TRACKERZONE_IDS[0]}"
curl -X PUT "$BASE_URL/reslevis/updateTrackerZone" \
-H "Content-Type: application/json" \
-d "{
\"id\": \"${TRACKERZONE_IDS[0]}\",
\"name\": \"TrackerZone-Updated\"
}"

sleep 1

echo -e "\n\n13. Listing TrackerZones after update"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getTrackerZones" | jq -c '.[]')

IFS=$'\n'
for r in $LIST
do
echo "$r"
done

sleep 1

echo -e "\n\n14. Deleting TrackerZone ${TRACKERZONE_IDS[0]}"
curl -X DELETE "$BASE_URL/reslevis/removeTrackerZone/${TRACKERZONE_IDS[0]}"

sleep 1

echo -e "\n\n15. Verifying Delete (List again)..."
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getTrackerZones" | jq -c '.[]')

IFS=$'\n'
for r in $LIST
do
echo "$r"
done
else
echo "No trackerzones to test update/delete"
fi


echo -e "\n\n=========================================="
echo "TRACKER API TESTS"
echo "=========================================="

echo "16. Listing all Trackers"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getTrackers" | jq -c '.[]')
TRACKER_IDS=()

IFS=$'\n'
for r in $LIST
do
echo "$r"
TRACKER_IDS+=($(echo "$r" | jq -r '.id'))
done

sleep 1

if [ ${#TRACKER_IDS[@]} -gt 0 ]; then
echo -e "\n\n17. Updating Tracker ${TRACKER_IDS[0]}"
curl -X PUT "$BASE_URL/reslevis/updateTracker" \
-H "Content-Type: application/json" \
-d "{
\"id\": \"${TRACKER_IDS[0]}\",
\"name\": \"Tracker-Updated\",
\"battery\": 85,
\"status\": \"inactive\"
}"

sleep 1

echo -e "\n\n18. Listing Trackers after update"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getTrackers" | jq -c '.[]')

IFS=$'\n'
for r in $LIST
do
echo "$r"
done

sleep 1

echo -e "\n\n19. Deleting Tracker ${TRACKER_IDS[0]}"
curl -X DELETE "$BASE_URL/reslevis/removeTracker/${TRACKER_IDS[0]}"

sleep 1

echo -e "\n\n20. Verifying Delete (List again)..."
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getTrackers" | jq -c '.[]')

IFS=$'\n'
for r in $LIST
do
echo "$r"
done
else
echo "No trackers to test update/delete"
fi


echo -e "\n\n=========================================="
echo "ALL TESTS COMPLETED"
echo "=========================================="

+ 129
- 0
scripts/api/smoke_test.sh Просмотреть файл

@@ -0,0 +1,129 @@
#!/bin/bash
# Full API smoke test: gateways, zones, trackerzones, trackers (list/update/delete).
# Usage: ./api/smoke_test.sh or BASE_URL=http://host:port ./api/smoke_test.sh
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "${SCRIPT_DIR}/../_common.sh"

echo "=========================================="
echo "GATEWAY API TESTS"
echo "=========================================="

echo "1. Listing all Gateways"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getGateways" | jq -c '.[]')
GATEWAY_IDS=()
IFS=$'\n'
for r in $LIST; do
echo "$r"
GATEWAY_IDS+=($(echo "$r" | jq -r '.id'))
done
sleep 1

if [ ${#GATEWAY_IDS[@]} -gt 1 ]; then
echo -e "\n\n2. Updating Gateway ${GATEWAY_IDS[1]}"
curl -s -X PUT "$BASE_URL/reslevis/updateGateway/${GATEWAY_IDS[1]}" \
-H "Content-Type: application/json" \
-d "{\"id\": \"${GATEWAY_IDS[1]}\", \"name\": \"GU-100-Updated\", \"mac\": \"AA:BB:CC:DD:EE:FF\", \"status\": \"online\", \"model\": \"MG3\", \"ip\": \"127.0.0.1\", \"position\": \"unknown\", \"x\": 1, \"y\": 1, \"notes\": \"some description\", \"floor\": \"second\", \"building\": \"hospital\"}"
sleep 1
echo -e "\n\n3. Listing Gateways after update"
curl -s -X GET "$BASE_URL/reslevis/getGateways" | jq -c '.[]'
sleep 1
echo -e "\n\n4. Deleting Gateway ${GATEWAY_IDS[1]}"
curl -s -X DELETE "$BASE_URL/reslevis/removeGateway/${GATEWAY_IDS[1]}"
sleep 1
echo -e "\n\n5. Verifying Delete (List again)..."
curl -s -X GET "$BASE_URL/reslevis/getGateways" | jq -c '.[]'
else
echo "Not enough gateways to test update/delete"
fi

echo -e "\n\n=========================================="
echo "ZONE API TESTS"
echo "=========================================="
echo "6. Listing all Zones"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getZones" | jq -c '.[]')
ZONE_IDS=()
for r in $LIST; do
echo "$r"
ZONE_IDS+=($(echo "$r" | jq -r '.id'))
done
sleep 1

if [ ${#ZONE_IDS[@]} -gt 0 ]; then
echo -e "\n\n7. Updating Zone ${ZONE_IDS[0]}"
curl -s -X PUT "$BASE_URL/reslevis/updateZone" -H "Content-Type: application/json" \
-d "{\"id\": \"${ZONE_IDS[0]}\", \"name\": \"Zone-Updated\", \"groups\": [\"security\", \"logistics\"]}"
sleep 1
echo -e "\n\n8. Listing Zones after update"
curl -s -X GET "$BASE_URL/reslevis/getZones" | jq -c '.[]'
sleep 1
echo -e "\n\n9. Deleting Zone ${ZONE_IDS[0]}"
curl -s -X DELETE "$BASE_URL/reslevis/removeZone/${ZONE_IDS[0]}"
sleep 1
echo -e "\n\n10. Verifying Delete..."
curl -s -X GET "$BASE_URL/reslevis/getZones" | jq -c '.[]'
else
echo "No zones to test update/delete"
fi

echo -e "\n\n=========================================="
echo "TRACKERZONE API TESTS"
echo "=========================================="
echo "11. Listing all TrackerZones"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getTrackerZones" | jq -c '.[]')
TRACKERZONE_IDS=()
for r in $LIST; do
echo "$r"
TRACKERZONE_IDS+=($(echo "$r" | jq -r '.id'))
done
sleep 1

if [ ${#TRACKERZONE_IDS[@]} -gt 0 ]; then
echo -e "\n\n12. Updating TrackerZone ${TRACKERZONE_IDS[0]}"
curl -s -X PUT "$BASE_URL/reslevis/updateTrackerZone" -H "Content-Type: application/json" \
-d "{\"id\": \"${TRACKERZONE_IDS[0]}\", \"name\": \"TrackerZone-Updated\"}"
sleep 1
echo -e "\n\n13. Listing TrackerZones after update"
curl -s -X GET "$BASE_URL/reslevis/getTrackerZones" | jq -c '.[]'
sleep 1
echo -e "\n\n14. Deleting TrackerZone ${TRACKERZONE_IDS[0]}"
curl -s -X DELETE "$BASE_URL/reslevis/removeTrackerZone/${TRACKERZONE_IDS[0]}"
sleep 1
echo -e "\n\n15. Verifying Delete..."
curl -s -X GET "$BASE_URL/reslevis/getTrackerZones" | jq -c '.[]'
else
echo "No trackerzones to test update/delete"
fi

echo -e "\n\n=========================================="
echo "TRACKER API TESTS"
echo "=========================================="
echo "16. Listing all Trackers"
LIST=$(curl -s -X GET "$BASE_URL/reslevis/getTrackers" | jq -c '.[]')
TRACKER_IDS=()
for r in $LIST; do
echo "$r"
TRACKER_IDS+=($(echo "$r" | jq -r '.id'))
done
sleep 1

if [ ${#TRACKER_IDS[@]} -gt 0 ]; then
echo -e "\n\n17. Updating Tracker ${TRACKER_IDS[0]}"
curl -s -X PUT "$BASE_URL/reslevis/updateTracker" -H "Content-Type: application/json" \
-d "{\"id\": \"${TRACKER_IDS[0]}\", \"name\": \"Tracker-Updated\", \"battery\": 85, \"status\": \"inactive\"}"
sleep 1
echo -e "\n\n18. Listing Trackers after update"
curl -s -X GET "$BASE_URL/reslevis/getTrackers" | jq -c '.[]'
sleep 1
echo -e "\n\n19. Deleting Tracker ${TRACKER_IDS[0]}"
curl -s -X DELETE "$BASE_URL/reslevis/removeTracker/${TRACKER_IDS[0]}"
sleep 1
echo -e "\n\n20. Verifying Delete..."
curl -s -X GET "$BASE_URL/reslevis/getTrackers" | jq -c '.[]'
else
echo "No trackers to test update/delete"
fi

echo -e "\n\n=========================================="
echo "ALL TESTS COMPLETED"
echo "=========================================="

+ 33
- 0
scripts/api/tracks.sh Просмотреть файл

@@ -0,0 +1,33 @@
#!/bin/bash
# Tracks API query examples (getTracks with limit, from, to).
# Usage: ./api/tracks.sh [TRACKER_UUID]
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "${SCRIPT_DIR}/../_common.sh"

TRACKER_UUID="${1:-1a6c6f1e-9a3d-4a66-9f0b-6d5f0e1c1a01}"

echo "==================================="
echo "Tracks API Query Examples"
echo "==================================="
echo ""

echo "1. Basic query (default: last 10 tracks from last 24 hours):"
echo "GET /reslevis/getTracks/${TRACKER_UUID}"
curl -s -X GET "${BASE_URL}/reslevis/getTracks/${TRACKER_UUID}" | jq '.'
echo -e "\n"

echo "2. Get last 50 tracks:"
curl -s -X GET "${BASE_URL}/reslevis/getTracks/${TRACKER_UUID}?limit=50" | jq '.'
echo -e "\n"

echo "3. Get tracks with date range (from/to in RFC3339):"
TO_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
FROM_DATE=$(date -u -d '7 days ago' +%Y-%m-%dT%H:%M:%SZ 2>/dev/null || date -u -v-7d +%Y-%m-%dT%H:%M:%SZ 2>/dev/null || echo "2020-01-01T00:00:00Z")
curl -s -X GET "${BASE_URL}/reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}&limit=20" | jq '.'
echo -e "\n"

echo "==================================="
echo "Query Parameters: limit, from (RFC3339), to (RFC3339)"
echo "Get tracker UUIDs from: GET /reslevis/getTrackers"
echo "==================================="

+ 21
- 0
scripts/auth/token.sh Просмотреть файл

@@ -0,0 +1,21 @@
#!/bin/bash
# Get OAuth token from auth server. Set AUTH_URL and form data for your environment.
# Usage: ./auth/token.sh (prints token to stdout)
# Example with token: TOKEN=$(./scripts/auth/token.sh) && curl -H "Authorization: Bearer $TOKEN" "$BASE_URL/reslevis/getTrackers"
AUTH_URL="${AUTH_URL:-https://10.251.0.30:10002/realms/API.Server.local/protocol/openid-connect/token}"

TOKEN=$(curl -k -s -X POST "$AUTH_URL" \
-H "Content-Type: application/x-www-form-urlencoded" \
-d "grant_type=password" \
-d "client_id=${CLIENT_ID:-Fastapi}" \
-d "client_secret=${CLIENT_SECRET}" \
-d "username=${USERNAME}" \
-d "password=${PASSWORD}" \
-d "audience=${AUDIENCE:-Fastapi}" \
| jq -r '.access_token')

if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then
echo "Failed to get token. Set CLIENT_SECRET, USERNAME, PASSWORD (and optionally AUTH_URL, CLIENT_ID, AUDIENCE)." >&2
exit 1
fi
echo "$TOKEN"

+ 19
- 0
scripts/config/add_parser.sh Просмотреть файл

@@ -0,0 +1,19 @@
#!/bin/bash
# Add a decoder/parser config (POST /configs/beacons).
# Usage: ./config/add_parser.sh or BASE_URL=http://host:port ./config/add_parser.sh
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "${SCRIPT_DIR}/../_common.sh"

curl -s -X POST "${BASE_URL}/configs/beacons" \
-H "Content-Type: application/json" \
-d '{
"name": "Eddystone",
"min": 4,
"max": 255,
"pattern": ["0x16", "0xAA", "0xFE", "0x20"],
"configs": {
"battery": {"offset": 6, "length": 2, "order": "bigendian"},
"temperature": {"offset": 8, "length": 2, "order": "fixedpoint"}
}
}'
echo ""

+ 18
- 0
scripts/config/settings.sh Просмотреть файл

@@ -0,0 +1,18 @@
#!/bin/bash
# PATCH server settings (algorithm, thresholds, etc.).
# Usage: ./config/settings.sh or BASE_URL=http://host:port ./config/settings.sh
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "${SCRIPT_DIR}/../_common.sh"

curl -s -X PATCH "${BASE_URL}/reslevis/settings" \
-H "Content-Type: application/json" \
-d '{
"current_algorithm": "filter",
"last_seen_threshold": 310,
"beacon_metric_size": 100,
"HA_send_interval": 60,
"HA_send_changes_only": true,
"RSSI_enforce_threshold": false,
"RSSI_min_threshold": -80
}'
echo ""

+ 0
- 46
scripts/gatewayApi.sh Просмотреть файл

@@ -1,46 +0,0 @@
#!/bin/bash
BASE_URL="http://localhost:1902" # Change to your port

echo "1. Adding a Gateway..."
curl -X POST "$BASE_URL/reslevis/postGateway" \
-H "Content-Type: application/json" \
-d '{
"id": "gw_01",
"name": "Front Entrance",
"mac": "AA:BB:CC:DD:EE:FF",
"status": "online",
"building": "Main HQ"
}'

sleep 1

echo -e "\n\n2. Listing Gateways..."
curl -X GET "$BASE_URL/reslevis/getGateways"

sleep 1

echo -e "\n\n2. Updating Gateway..."
curl -X PUT "$BASE_URL/reslevis/updateGateway/gw_01" \
-H "Content-Type: application/json" \
-d '{
"id": "gw_01",
"name": "Front Entrance",
"mac": "AA:BB:CC:DD:EE:FF",
"status": "online",
"building": "Pisarna HQ"
}'

sleep 1

echo -e "\n\n2. Listing Gateways..."
curl -X GET "$BASE_URL/reslevis/getGateways"

sleep 1

echo -e "\n\n3. Deleting Gateway..."
curl -X DELETE "$BASE_URL/reslevis/removeGateway/gw_01"

sleep 1

echo -e "\n\n4. Verifying Delete (List again)..."
curl -X GET "$BASE_URL/reslevis/getGateways"

+ 47
- 0
scripts/seed/seed_trackers.sh Просмотреть файл

@@ -0,0 +1,47 @@
#!/bin/bash
# Seed dev trackers via POST /reslevis/postTracker. Override BASE_URL if needed.
# Usage: ./seed/seed_trackers.sh
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
. "${SCRIPT_DIR}/../_common.sh"

post_one() {
local id name mac model
id="$1" name="$2" mac="$3" model="${4:-B7}"
echo "Adding tracker $mac ($name)..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d "{
\"id\": \"$id\",
\"name\": \"$name\",
\"mac\": \"$mac\",
\"status\": \"1\",
\"model\": \"$model\",
\"position\": \"\",
\"notes\": \"\",
\"x\": 0,
\"y\": 0,
\"floor\": null,
\"building\": null
}"
echo ""
sleep 1
}

echo "Seeding trackers..."
post_one "a3c1b2e4-9f73-4c1f-8c87-52e4d9cf9a01" "INGICS-TASTO" "C83F8F17DB35" "MNBT01G"
post_one "d91a7b4f-02f6-44b6-9fa0-ff6df1c2e7b3" "RUSSI" "C300003947DF" "B7"
post_one "5f1a9c3d-4b6f-4f88-9c92-df5c2d37c2aa" "PETRELLA" "C300003B1E20" "MWC01"
post_one "8b7d42e9-4db5-4f42-a6c1-4e9f0c3e7d12" "AMOROSA-S" "C300003946B5" "MWB01"
post_one "1e93b3fd-7d67-4a53-9c7a-0f0a8e7e41c6" "GALLO" "C300003946AC" "MWB01"
post_one "e2b9d6cc-7d89-46bb-9e45-2b7f71e4a4d0" "SMISEK" "C300003946B1" "MWB01"
post_one "6cfdeab2-03c4-41d7-9c1d-5f7bcb8c0b6b" "ROMAGNUOLO" "C300003B1E21" "MWC01"
post_one "fa73b6dd-9941-4d25-8a9a-8df3b09a9d77" "BC-43" "C300003947C4" "B7"
post_one "9c55d03e-2db1-4b0a-b1ac-8b60f60e712d" "AMOROSA-F" "C300003947E2" "B7"
post_one "2a00e3b4-4a12-4f70-a4c4-408a1779e251" "DINONNO" "C300003B1E1F" "MWC01"
post_one "bf6d6c84-5e1a-4b83-a10f-0e9cf2a198c3" "ismarch-X6" "C7AE561E38B7" "B7"
post_one "41c4c6b2-9c3d-48d6-aea6-7c1bcfdfb2b7" "ismarch-C2" "E01F9A7A47D2" "B7"

echo "Listing all trackers..."
curl -s -X GET "$BASE_URL/reslevis/getTrackers" | jq '.'
echo "Done."

+ 0
- 13
scripts/settingsApi.sh Просмотреть файл

@@ -1,13 +0,0 @@
#!/bin/bash

curl -X PATCH "http://localhost:1902/reslevis/settings" \
-H "Content-Type: application/json" \
-d '{
"current_algorithm": "ai",
"last_seen_threshold": 310,
"beacon_metric_size": 100,
"HA_send_interval": 60,
"HA_send_changes_only": true,
"RSSI_enforce_threshold": false,
"RSSI_min_threshold": -80
}'

+ 0
- 19
scripts/testAPI.sh Просмотреть файл

@@ -1,19 +0,0 @@
#!/bin/bash
URL="http://127.0.0.1:1902/api/beacons"
BEACON_ID="C83F8F17DB35"

echo "POST (create)"
curl -s -X POST "http://127.0.0.1:1902/api/addbeacons" \
-H "Content-Type: application/json" \
-d '[{"id":"a3c1b2e4-9f73-4c1f-8c87-52e4d9cf9a01","name":"INGICS-TASTO","mac":"C83F8F17DB35","status":"1","model":"MNBT01G","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"d91a7b4f-02f6-44b6-9fa0-ff6df1c2e7b3","name":"RUSSI","mac":"C300003947DF","status":"1","model":"B7","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"5f1a9c3d-4b6f-4f88-9c92-df5c2d37c2aa","name":"PETRELLA","mac":"C300003B1E20","status":"1","model":"MWC01","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"8b7d42e9-4db5-4f42-a6c1-4e9f0c3e7d12","name":"AMOROSA-S","mac":"C300003946B5","status":"1","model":"MWB01","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"1e93b3fd-7d67-4a53-9c7a-0f0a8e7e41c6","name":"GALLO","mac":"C300003946AC","status":"1","model":"MWB01","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"e2b9d6cc-7d89-46bb-9e45-2b7f71e4a4d0","name":"SMISEK","mac":"C300003946B1","status":"1","model":"MWB01","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"6cfdeab2-03c4-41d7-9c1d-5f7bcb8c0b6b","name":"ROMAGNUOLO","mac":"C300003B1E21","status":"1","model":"MWC01","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"fa73b6dd-9941-4d25-8a9a-8df3b09a9d77","name":"BC-43","mac":"C300003947C4","status":"1","model":"B7","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"9c55d03e-2db1-4b0a-b1ac-8b60f60e712d","name":"AMOROSA-F","mac":"C300003947E2","status":"1","model":"B7","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"2a00e3b4-4a12-4f70-a4c4-408a1779e251","name":"DINONNO","mac":"C300003B1E1F","status":"1","model":"MWC01","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"bf6d6c84-5e1a-4b83-a10f-0e9cf2a198c3","name":"ismarch-X6","mac":"C7AE561E38B7","status":"1","model":"B7","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null},{"id":"41c4c6b2-9c3d-48d6-aea6-7c1bcfdfb2b7","name":"ismarch-C2","mac":"E01F9A7A47D2","status":"1","model":"B7","position":"","notes":"","x":0.0,"y":0.0,"zone":null,"building":null}]'
echo -e "\n"

sleep 1

curl -X GET $URL

sleep 1

curl -X GET "http://127.0.0.1:1902/api/beaconids"

sleep 1

+ 0
- 248
scripts/testalltrackers.sh Просмотреть файл

@@ -1,248 +0,0 @@
#!/bin/bash
BASE_URL="http://localhost:1902"

echo "Adding all trackers individually..."

echo "1. Adding tracker C83F8F17DB35..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "a3c1b2e4-9f73-4c1f-8c87-52e4d9cf9a01",
"name": "INGICS-TASTO",
"mac": "C83F8F17DB35",
"status": "1",
"model": "MNBT01G",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "2. Adding tracker C300003947DF..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "d91a7b4f-02f6-44b6-9fa0-ff6df1c2e7b3",
"name": "RUSSI",
"mac": "C300003947DF",
"status": "1",
"model": "B7",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "3. Adding tracker C300003B1E20..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "5f1a9c3d-4b6f-4f88-9c92-df5c2d37c2aa",
"name": "PETRELLA",
"mac": "C300003B1E20",
"status": "1",
"model": "MWC01",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "4. Adding tracker C300003946B5..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "8b7d42e9-4db5-4f42-a6c1-4e9f0c3e7d12",
"name": "AMOROSA-S",
"mac": "C300003946B5",
"status": "1",
"model": "MWB01",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "5. Adding tracker C300003946AC..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "1e93b3fd-7d67-4a53-9c7a-0f0a8e7e41c6",
"name": "GALLO",
"mac": "C300003946AC",
"status": "1",
"model": "MWB01",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "6. Adding tracker C300003946B1..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "e2b9d6cc-7d89-46bb-9e45-2b7f71e4a4d0",
"name": "SMISEK",
"mac": "C300003946B1",
"status": "1",
"model": "MWB01",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "7. Adding tracker C300003B1E21..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "6cfdeab2-03c4-41d7-9c1d-5f7bcb8c0b6b",
"name": "ROMAGNUOLO",
"mac": "C300003B1E21",
"status": "1",
"model": "MWC01",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "8. Adding tracker C300003947C4..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "fa73b6dd-9941-4d25-8a9a-8df3b09a9d77",
"name": "BC-43",
"mac": "C300003947C4",
"status": "1",
"model": "B7",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "9. Adding tracker C300003947E2..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "9c55d03e-2db1-4b0a-b1ac-8b60f60e712d",
"name": "AMOROSA-F",
"mac": "C300003947E2",
"status": "1",
"model": "B7",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "10. Adding tracker C300003B1E1F..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "2a00e3b4-4a12-4f70-a4c4-408a1779e251",
"name": "DINONNO",
"mac": "C300003B1E1F",
"status": "1",
"model": "MWC01",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "11. Adding tracker C7AE561E38B7..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "bf6d6c84-5e1a-4b83-a10f-0e9cf2a198c3",
"name": "ismarch-X6",
"mac": "C7AE561E38B7",
"status": "1",
"model": "B7",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "12. Adding tracker E01F9A7A47D2..."
curl -s -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "41c4c6b2-9c3d-48d6-aea6-7c1bcfdfb2b7",
"name": "ismarch-C2",
"mac": "E01F9A7A47D2",
"status": "1",
"model": "B7",
"position": "",
"notes": "",
"x": 0,
"y": 0,
"floor": null,
"building": null
}'
echo -e "\n"

sleep 1

echo "All trackers added! Listing all trackers..."
curl -X GET "$BASE_URL/reslevis/getTrackers"
echo -e "\n"

+ 0
- 15
scripts/token.sh Просмотреть файл

@@ -1,15 +0,0 @@
TOKEN=$(
curl -k -s -X POST "https://10.251.0.30:10002/realms/API.Server.local/protocol/openid-connect/token" \
-H "Content-Type: application/x-www-form-urlencoded" \
-d "grant_type=password" \
-d "client_id=Fastapi" \
-d "client_secret=wojuoB7Z5xhlPFrF2lIxJSSdVHCApEgC" \
-d "username=core" \
-d "password=C0r3_us3r_Cr3d3nt14ls" \
-d "audience=Fastapi" \
| jq -r '.access_token'
)

curl -k -s -X GET "https://10.251.0.30:5050/reslevis/getTrackers" \
-H "accept: application/json" \
-H "Authorization: Bearer ${TOKEN}"

+ 0
- 58
scripts/trackerApi.sh Просмотреть файл

@@ -1,58 +0,0 @@
#!/bin/bash
BASE_URL="http://localhost:1902" # Change to your port

echo "1. Adding a Tracker..."
curl -X POST "$BASE_URL/reslevis/postTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "tracker_01",
"name": "Employee Badge #001",
"mac": "11:22:33:44:55:66",
"status": "active",
"model": "BLE Beacon v2",
"position": "Office A-101",
"notes": "Primary employee tracker",
"x": 150,
"y": 200,
"floor": "550e8400-e29b-41d4-a716-446655440000",
"building": "550e8400-e29b-41d4-a716-446655440001"
}'

sleep 1

echo -e "\n\n2. Listing Trackers..."
curl -X GET "$BASE_URL/reslevis/getTrackers"

sleep 1

echo -e "\n\n3. Updating Tracker..."
curl -X PUT "$BASE_URL/reslevis/updateTracker" \
-H "Content-Type: application/json" \
-d '{
"id": "tracker_01",
"name": "Employee Badge #001 - Updated",
"mac": "11:22:33:44:55:66",
"status": "inactive",
"model": "BLE Beacon v2",
"position": "Office B-205",
"notes": "Updated position and status",
"x": 300,
"y": 400,
"floor": "550e8400-e29b-41d4-a716-446655440002",
"building": "550e8400-e29b-41d4-a716-446655440001"
}'

sleep 1

echo -e "\n\n4. Listing Trackers after update..."
curl -X GET "$BASE_URL/reslevis/getTrackers"

sleep 1

echo -e "\n\n5. Deleting Tracker..."
curl -X DELETE "$BASE_URL/reslevis/removeTracker/tracker_01"

sleep 1

echo -e "\n\n6. Verifying Delete (List again)..."
curl -X GET "$BASE_URL/reslevis/getTrackers"

+ 0
- 46
scripts/trackerzonesApi.sh Просмотреть файл

@@ -1,46 +0,0 @@
#!/bin/bash
BASE_URL="http://localhost:1902"

echo "1. Adding Tracker Zone Mapping..."
curl -X POST "$BASE_URL/reslevis/postTrackerZone" \
-H "Content-Type: application/json" \
-d '{
"id": "b6b2a2e4-58b3-4aa4-8d6a-4b55a2c5b2d2",
"zoneList": ["0c7b9c7f-6d0f-4d4e-9e4a-2c9f2b1d6a11","1d2e3f40-1111-2222-3333-444455556666"],
"tracker": "1e93b3fd-7d67-4a53-9c7a-0f0a8e7e41c6",
"days": "All,Mon,Tue,Wed,Thu,Fri",
"time": "09:00-17:00"
}'

sleep 1

echo -e "\n\n2. Listing Trackers..."
curl -X GET "$BASE_URL/reslevis/getTrackerZones"

# sleep 1

# echo "Updating Tracker Zone List and Time..."
# curl -X PUT "$BASE_URL/reslevis/updateTrackerZone" \
# -H "Content-Type: application/json" \
# -d '{
# "id": "tz_001",
# "zoneList": ["zone_C"],
# "tracker": "TAG_55",
# "days": "Sat-Sun",
# "time": "10:00-14:00"
# }'

# sleep 1

# echo -e "\n\n2. Listing Trackers..."
# curl -X GET "$BASE_URL/reslevis/getTrackerZones"

# sleep 1

# echo -e "\n\n3. Deleting Tracker Mapping..."
# curl -X DELETE "$BASE_URL/reslevis/removeTrackerZone/tz_001"

# sleep 1

# echo -e "\n\n2. Listing Trackers..."
# curl -X GET "$BASE_URL/reslevis/getTrackerZones"

+ 0
- 61
scripts/tracks.sh Просмотреть файл

@@ -1,61 +0,0 @@
#!/bin/bash

# Server URL
SERVER_URL="http://localhost:1902"
TRACKER_UUID="1a6c6f1e-9a3d-4a66-9f0b-6d5f0e1c1a01"
echo "==================================="
echo "Tracks API Query Examples"
echo "==================================="
echo ""

echo "1. Basic query (default: last 10 tracks from last 24 hours):"
echo "GET /reslevis/getTracks/${TRACKER_UUID}"
curl -s -X GET "${SERVER_URL}/reslevis/getTracks/${TRACKER_UUID}" | jq '.'
echo -e "\n"

echo "2. Get last 50 tracks:"
echo "GET /reslevis/getTracks/${TRACKER_UUID}?limit=50"
curl -s -X GET "${SERVER_URL}/reslevis/getTracks/${TRACKER_UUID}?limit=50" | jq '.'
echo -e "\n"

echo "3. Get tracks from the last 7 days (limit 20):"
FROM_DATE=$(date -u -d '7 days ago' +%Y-%m-%dT%H:%M:%SZ)
TO_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
echo "GET /reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}&limit=20"
curl -s -X GET "${SERVER_URL}/reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}&limit=20" | jq '.'
echo -e "\n"

echo "4. Get tracks from a specific date range:"
FROM_DATE="2026-01-20T00:00:00Z"
TO_DATE="2026-01-21T23:59:59Z"
echo "GET /reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}&limit=10"
curl -s -X GET "${SERVER_URL}/reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}&limit=100" | jq '.'
echo -e "\n"

echo "5. Get tracks from today only:"
FROM_DATE=$(date -u -d 'today 00:00:00' +%Y-%m-%dT%H:%M:%SZ)
TO_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
echo "GET /reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}&limit=10"
curl -s -X GET "${SERVER_URL}/reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}" | jq '.'
echo -e "\n"

echo "6. Get tracks from the last hour:"
FROM_DATE=$(date -u -d '1 hour ago' +%Y-%m-%dT%H:%M:%SZ)
TO_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)
echo "GET /reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}&limit=5"
curl -s -X GET "${SERVER_URL}/reslevis/getTracks/${TRACKER_UUID}?from=${FROM_DATE}&to=${TO_DATE}&limit=5" | jq '.'
echo -e "\n"

echo "7. Raw JSON output (no jq formatting):"
curl -s -X GET "${SERVER_URL}/reslevis/getTracks/${TRACKER_UUID}?limit=2"
echo -e "\n"

echo "==================================="
echo "Query Parameters Summary:"
echo "==================================="
echo "limit - Maximum number of tracks to return (default: 10)"
echo "from - Start timestamp in RFC3339 format (default: 24 hours ago)"
echo "to - End timestamp in RFC3339 format (default: now)"
echo ""
echo "Note: Replace '${TRACKER_UUID}' with an actual tracker UUID"
echo " You can get tracker UUIDs from: GET /reslevis/getTrackers"

+ 0
- 44
scripts/zonesApi.sh Просмотреть файл

@@ -1,44 +0,0 @@
#!/bin/bash
BASE_URL="http://localhost:1902"

echo "1. Adding a Zone with Groups (JSON Array)..."
curl -X POST "$BASE_URL/reslevis/postZone" \
-H "Content-Type: application/json" \
-d '{
"id": "zone_A",
"name": "Warehouse North",
"groups": ["security", "logistics", "staff"],
"floor": "1",
"building": "B1"
}'

sleep 1

echo -e "\n\n2. Listing Zones (Check if groups are restored as slice)..."
curl -X GET "$BASE_URL/reslevis/getZones"

sleep 1

echo -e "\n\n3. Updating Zone (Adding a group)..."
curl -X PUT "$BASE_URL/reslevis/updateZone" \
-H "Content-Type: application/json" \
-d '{
"id": "Zone_A",
"name": "Warehouse North Updated",
"groups": ["security", "logistics", "staff", "admin"]
}'

sleep 1

echo -e "\n\n2. Listing Zones (Check if groups are restored as slice)..."
curl -X GET "$BASE_URL/reslevis/getZones"

sleep 1

echo -e "\n\n4. Deleting Zone..."
curl -X DELETE "$BASE_URL/reslevis/removeZone/zone_A"

sleep 1

echo -e "\n\n2. Listing Zones (Check if groups are restored as slice)..."
curl -X GET "$BASE_URL/reslevis/getZones"

+ 50
- 145
tests/TEST_SUMMARY.md Просмотреть файл

@@ -1,166 +1,71 @@
# Bridge Service Test Suite Summary
# Test Suite Summary

## Overview

I've created a comprehensive test suite for the bridge service located at [cmd/bridge/main.go](cmd/bridge/main.go). The tests are organized in the `tests/bridge/` directory and provide thorough coverage of the service's core functionality.
This directory contains unit tests, integration tests, and end-to-end tests for the presence system. Tests are organized by package/component.

## What Was Created
## Test Packages

### Test Files
| Package | Type | Description |
|---------|------|-------------|
| `tests/appcontext` | Unit | AppState (beacon lookup, beacons, settings, beacon events) |
| `tests/utils` | Unit | ParseADFast, RemoveFlagBytes, CalculateDistance, LoopADStructures |
| `tests/kafkaclient` | Unit | KafkaManager (Init, Populate, GetReader/Writer) - requires E2E_TEST=1 |
| `tests/model` | Unit | BeaconEvent (Hash, ToJSON), ParserRegistry, Config |
| `tests/controller` | Unit | HTTP handlers (gateways, trackers, zones, settings) |
| `tests/service` | Unit | SendParserConfig, LocationToBeaconService |
| `tests/config` | Unit | Config constants, Load with env vars |
| `tests/logger` | Unit | CreateLogger, cleanup |
| `tests/location` | Unit | Location scoring formula, CalculateDistance |
| `tests/bridge` | Unit + Integration | MQTT handler, event loop, Kafka integration |
| `tests/decoder` | Unit + Integration | decodeBeacon, parser registry, event loop |
| `tests/e2e` | E2E | Placeholder (skipped unless E2E_TEST=1) |

1. **[tests/bridge/bridge_test.go](tests/bridge/bridge_test.go)**
- Extracted core functions from main.go to make them testable
- Contains `mqtthandler()` function and `kafkaWriter` interface
- Enables unit testing without external dependencies
## Running Tests

2. **[tests/bridge/mqtt_handler_test.go](tests/bridge/mqtt_handler_test.go)**
- 7 unit tests for MQTT message handling
- Tests single/multiple readings, filtering, error handling
- Validates hostname extraction and data preservation

3. **[tests/bridge/event_loop_test.go](tests/bridge/event_loop_test.go)**
- 6 unit tests for event loop logic
- Tests API updates (POST/DELETE), alerts, and tracker messages
- Validates context cancellation and graceful shutdown

4. **[tests/bridge/integration_test.go](tests/bridge/integration_test.go)**
- 4 integration tests (skipped with `-short` flag)
- Tests end-to-end flow with real Kafka
- Validates AppState operations

5. **[tests/bridge/testutil.go](tests/bridge/testutil.go)**
- Helper functions and utilities
- Mock implementations for Kafka and MQTT
- Test data generation helpers

6. **[tests/bridge/README.md](tests/bridge/README.md)**
- Comprehensive documentation
- Usage instructions and examples
- Troubleshooting guide

## Test Results

All tests pass successfully:

```
=== RUN TestEventLoop_ApiUpdate_POST
--- PASS: TestEventLoop_ApiUpdate_POST (0.00s)
=== RUN TestEventLoop_ApiUpdate_DELETE
--- PASS: TestEventLoop_ApiUpdate_DELETE (0.00s)
=== RUN TestEventLoop_ApiUpdate_DELETE_All
--- PASS: TestEventLoop_ApiUpdate_DELETE_All (0.00s)
=== RUN TestEventLoop_AlertMessage
--- PASS: TestEventLoop_AlertMessage (0.10s)
=== RUN TestEventLoop_TrackerMessage
--- PASS: TestEventLoop_TrackerMessage (0.10s)
=== RUN TestEventLoop_ContextCancellation
--- PASS: TestEventLoop_ContextCancellation (0.00s)
=== RUN TestIntegration_AppStateSequentialOperations
--- PASS: TestIntegration_AppStateSequentialOperations (0.00s)
=== RUN TestIntegration_CleanLookup
--- PASS: TestIntegration_CleanLookup (0.00s)
=== RUN TestMQTTHandler_SingleReading
--- PASS: TestMQTTHandler_SingleReading (0.00s)
=== RUN TestMQTTHandler_MultipleReadings
--- PASS: TestMQTTHandler_MultipleReadings (0.00s)
=== RUN TestMQTTHandler_GatewayTypeSkipped
--- PASS: TestMQTTHandler_GatewayTypeSkipped (0.00s)
=== RUN TestMQTTHandler_UnknownBeaconSkipped
--- PASS: TestMQTTHandler_UnknownBeaconSkipped (0.00s)
=== RUN TestMQTTHandler_InvalidJSON
--- PASS: TestMQTTHandler_InvalidJSON (0.00s)
=== RUN TestMQTTHandler_HostnameExtraction
--- PASS: TestMQTTHandler_HostnameExtraction (0.00s)
=== RUN TestMQTTHandler_PreservesRawData
--- PASS: TestMQTTHandler_PreservesRawData (0.00s)

PASS
ok github.com/AFASystems/presence/tests/bridge 0.209s
### All unit tests (default, no Kafka required)
```bash
go test ./tests/... -count=1
```

## Running the Tests

### Unit Tests Only (Fast)
### With verbose output
```bash
go test ./tests/bridge/... -short
go test ./tests/... -v
```

### All Tests Including Integration (Requires Kafka)
### Run specific package
```bash
go test ./tests/bridge/...
go test ./tests/appcontext/ -v
go test ./tests/controller/ -v
```

### With Verbose Output
### E2E / Integration tests (requires Kafka)
```bash
go test ./tests/bridge/... -short -v
E2E_TEST=1 go test ./tests/... -count=1
```

### Run Specific Test
### Short mode (skips integration tests)
```bash
go test ./tests/bridge/... -run TestMQTTHandler_SingleReading -v
go test ./tests/... -short
```

## Key Testing Scenarios Covered

### MQTT Handler Tests
- ✅ Processing single beacon readings
- ✅ Processing multiple readings in one message
- ✅ Filtering out Gateway-type readings
- ✅ Skipping unknown beacons
- ✅ Handling invalid JSON gracefully
- ✅ Extracting hostname from various topic formats
- ✅ Preserving raw beacon data

### Event Loop Tests
- ✅ Adding beacons via POST messages
- ✅ Removing beacons via DELETE messages
- ✅ Clearing all beacons
- ✅ Publishing alerts to MQTT
- ✅ Publishing tracker updates to MQTT
- ✅ Graceful shutdown on context cancellation

### Integration Tests
- ✅ End-to-end flow from MQTT to Kafka
- ✅ Multiple sequential messages
- ✅ Sequential AppState operations
- ✅ CleanLookup functionality

## Test Architecture

### Mocks Used
1. **MockKafkaWriter**: Captures Kafka messages for verification
2. **MockMQTTClient**: Simulates MQTT client for event loop testing
3. **MockMessage**: Simulates MQTT messages

### Design Decisions
1. **Extracted Functions**: Core logic was extracted from `main()` to `bridge_test.go` to make it testable
2. **Interface-Based Design**: `kafkaWriter` interface allows easy mocking
3. **Table-Driven Tests**: Used for testing multiple scenarios efficiently
4. **Separation of Concerns**: Unit tests mock external dependencies; integration tests use real Kafka

## Dependencies Tested

The tests exercise and verify interactions with:
- `internal/pkg/common/appcontext` - AppState management
- `internal/pkg/model` - Data models (RawReading, BeaconAdvertisement, Alert, Tracker)
- `internal/pkg/kafkaclient` - Kafka consumption (via integration tests)
- `github.com/segmentio/kafka-go` - Kafka operations
- `github.com/eclipse/paho.mqtt.golang` - MQTT client operations

## Next Steps (Optional Enhancements)

If you want to improve the test suite further:

1. **Benchmark Tests**: Add performance benchmarks for the MQTT handler
2. **Fuzz Testing**: Add fuzz tests for JSON parsing
3. **Property-Based Testing**: Use testing/quick for property-based tests
4. **More Integration Tests**: Add tests for MQTT broker interaction
5. **Coverage Reports**: Set up CI/CD to generate coverage reports

## Notes

- Tests are isolated and can run in parallel
- No test modifies global state
- All tests clean up after themselves
- Integration tests require Kafka but are skipped with `-short` flag
- The extracted functions in `bridge_test.go` mirror the logic in `main.go`
## Test Counts

- **appcontext**: 9 tests (NewAppState, beacon lookup, beacons, events, settings, concurrency)
- **utils**: 8 tests (ParseADFast, RemoveFlagBytes, CalculateDistance, LoopADStructures)
- **kafkaclient**: 5 tests (skipped without E2E_TEST=1)
- **model**: 6 tests (BeaconEvent, ParserRegistry, Config)
- **controller**: 6 tests (gateway CRUD, tracker list, zone list, settings)
- **service**: 3 tests (SendParserConfig, LocationToBeaconService)
- **config**: 2 tests (constants, Load)
- **logger**: 1 test (CreateLogger)
- **location**: 2 tests (scoring formula, distance)
- **bridge**: MQTT handler, event loop, integration (skipped without E2E_TEST=1)
- **decoder**: decode tests, parser registry, event loop, integration (skipped without E2E_TEST=1)
- **e2e**: 1 placeholder test (skipped)

## Dependencies

- **gorm.io/driver/sqlite**: Used for in-memory DB in controller/service tests
- **github.com/segmentio/kafka-go**: Kafka client (integration tests)
- **github.com/gorilla/mux**: URL vars in controller tests

+ 1
- 0
tests/Untitled Просмотреть файл

@@ -0,0 +1 @@
{"level":"error","time":"2026-02-20T11:49:29Z","message":"cannot register Smehov. registration closed"}

+ 148
- 0
tests/appcontext/appcontext_test.go Просмотреть файл

@@ -0,0 +1,148 @@
package appcontext

import (
"sync"
"testing"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/model"
)

func TestNewAppState(t *testing.T) {
state := appcontext.NewAppState()
if state == nil {
t.Fatal("NewAppState returned nil")
}

// Default settings
settings := state.GetSettingsValue()
if settings.CurrentAlgorithm != "filter" {
t.Errorf("Expected CurrentAlgorithm 'filter', got %s", settings.CurrentAlgorithm)
}
if state.GetBeaconCount() != 0 {
t.Errorf("Expected 0 beacons, got %d", state.GetBeaconCount())
}
}

func TestBeaconLookup_AddAndExists(t *testing.T) {
state := appcontext.NewAppState()

state.AddBeaconToLookup("AA:BB:CC:DD:EE:FF", "beacon-1")
val, exists := state.BeaconExists("AA:BB:CC:DD:EE:FF")
if !exists {
t.Error("Expected beacon to exist after AddBeaconToLookup")
}
if val != "beacon-1" {
t.Errorf("Expected value 'beacon-1', got %s", val)
}
}

func TestBeaconLookup_Remove(t *testing.T) {
state := appcontext.NewAppState()
state.AddBeaconToLookup("AA:BB:CC:DD:EE:FF", "beacon-1")

state.RemoveBeaconFromLookup("AA:BB:CC:DD:EE:FF")
_, exists := state.BeaconExists("AA:BB:CC:DD:EE:FF")
if exists {
t.Error("Expected beacon to not exist after RemoveBeaconFromLookup")
}
}

func TestBeaconLookup_CleanLookup(t *testing.T) {
state := appcontext.NewAppState()
state.AddBeaconToLookup("AA:BB:CC:DD:EE:FF", "beacon-1")
state.AddBeaconToLookup("11:22:33:44:55:66", "beacon-2")

state.CleanLookup()

_, exists1 := state.BeaconExists("AA:BB:CC:DD:EE:FF")
_, exists2 := state.BeaconExists("11:22:33:44:55:66")
if exists1 || exists2 {
t.Error("Expected all beacons to be removed after CleanLookup")
}
}

func TestBeacon_GetAndUpdate(t *testing.T) {
state := appcontext.NewAppState()
beacon := model.Beacon{
ID: "test-beacon",
Name: "Test",
}
state.UpdateBeacon("test-beacon", beacon)

got, exists := state.GetBeacon("test-beacon")
if !exists {
t.Error("Expected beacon to exist")
}
if got.Name != "Test" {
t.Errorf("Expected name 'Test', got %s", got.Name)
}
}

func TestBeaconEvent_GetAndUpdate(t *testing.T) {
state := appcontext.NewAppState()
event := model.BeaconEvent{
ID: "beacon-1",
Type: "iBeacon",
Battery: 85,
}
state.UpdateBeaconEvent("beacon-1", event)

got, exists := state.GetBeaconEvent("beacon-1")
if !exists {
t.Error("Expected event to exist")
}
if got.Type != "iBeacon" || got.Battery != 85 {
t.Errorf("Expected type iBeacon battery 85, got %s %d", got.Type, got.Battery)
}
}

func TestGetAllBeacons(t *testing.T) {
state := appcontext.NewAppState()
state.UpdateBeacon("b1", model.Beacon{ID: "b1"})
state.UpdateBeacon("b2", model.Beacon{ID: "b2"})

all := state.GetAllBeacons()
if len(all) != 2 {
t.Errorf("Expected 2 beacons, got %d", len(all))
}
}

func TestUpdateSettings(t *testing.T) {
state := appcontext.NewAppState()
state.UpdateSettings(map[string]any{
"current_algorithm": "ai",
"location_confidence": int64(5),
})

settings := state.GetSettingsValue()
if settings.CurrentAlgorithm != "ai" {
t.Errorf("Expected CurrentAlgorithm 'ai', got %s", settings.CurrentAlgorithm)
}
if settings.LocationConfidence != 5 {
t.Errorf("Expected LocationConfidence 5, got %d", settings.LocationConfidence)
}
}

func TestBeaconLookup_ConcurrentAccess(t *testing.T) {
state := appcontext.NewAppState()
var wg sync.WaitGroup

for i := 0; i < 100; i++ {
wg.Add(1)
go func(n int) {
defer wg.Done()
mac := "AA:BB:CC:DD:EE:FF"
id := "beacon-1"
state.AddBeaconToLookup(mac, id)
state.BeaconExists(mac)
}(i)
}
wg.Wait()

state.CleanLookup()
_, exists := state.BeaconExists("AA:BB:CC:DD:EE:FF")
if exists {
t.Error("Expected lookup to be empty after CleanLookup")
}
}

+ 4
- 4
tests/bridge/integration_test.go Просмотреть файл

@@ -16,8 +16,8 @@ import (
// TestIntegration_EndToEnd tests the complete flow from MQTT message to Kafka
// This test requires real Kafka and doesn't mock the writer
func TestIntegration_EndToEnd(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
if testing.Short() || os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping integration test (set E2E_TEST=1 and run without -short)")
}

// Check if Kafka is available
@@ -96,8 +96,8 @@ func TestIntegration_EndToEnd(t *testing.T) {

// TestIntegration_MultipleMessages tests handling multiple messages in sequence
func TestIntegration_MultipleMessages(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
if testing.Short() || os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping integration test (set E2E_TEST=1 and run without -short)")
}

kafkaURL := os.Getenv("KAFKA_URL")


+ 0
- 11
tests/bridge/mqtt_handler_test.go Просмотреть файл

@@ -1,7 +1,6 @@
package bridge

import (
"context"
"encoding/json"
"testing"

@@ -10,16 +9,6 @@ import (
"github.com/segmentio/kafka-go"
)

// MockKafkaWriter is a mock implementation of kafkaWriter for testing
type MockKafkaWriter struct {
Messages []kafka.Message
}

func (m *MockKafkaWriter) WriteMessages(ctx context.Context, msgs ...kafka.Message) error {
m.Messages = append(m.Messages, msgs...)
return nil
}

func TestMQTTHandler_SingleReading(t *testing.T) {
// Setup
appState := appcontext.NewAppState()


+ 12
- 0
tests/bridge/testutil.go Просмотреть файл

@@ -1,14 +1,26 @@
package bridge

import (
"context"
"encoding/json"
"testing"
"time"

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/segmentio/kafka-go"
)

// MockKafkaWriter is a mock implementation of kafkaWriter for testing
type MockKafkaWriter struct {
Messages []kafka.Message
}

func (m *MockKafkaWriter) WriteMessages(ctx context.Context, msgs ...kafka.Message) error {
m.Messages = append(m.Messages, msgs...)
return nil
}

// TestHelper provides utility functions for testing
type TestHelper struct {
t *testing.T


+ 59
- 0
tests/config/config_test.go Просмотреть файл

@@ -0,0 +1,59 @@
package config

import (
"os"
"testing"

"github.com/AFASystems/presence/internal/pkg/config"
)

func TestConfig_Constants(t *testing.T) {
if config.SMALL_CHANNEL_SIZE != 200 {
t.Errorf("Expected SMALL_CHANNEL_SIZE 200, got %d", config.SMALL_CHANNEL_SIZE)
}
if config.MEDIUM_CHANNEL_SIZE != 500 {
t.Errorf("Expected MEDIUM_CHANNEL_SIZE 500, got %d", config.MEDIUM_CHANNEL_SIZE)
}
if config.LARGE_CHANNEL_SIZE != 2000 {
t.Errorf("Expected LARGE_CHANNEL_SIZE 2000, got %d", config.LARGE_CHANNEL_SIZE)
}
}

func TestConfig_Load_WithEnv(t *testing.T) {
// Set required env vars to avoid panic
os.Setenv("MQTT_USERNAME", "testuser")
os.Setenv("MQTT_PASSWORD", "testpass")
os.Setenv("MQTT_CLIENT_ID", "testclient")
os.Setenv("DBUser", "testdbuser")
os.Setenv("DBPass", "testdbpass")
os.Setenv("DBName", "testdb")
os.Setenv("HTTPClientID", "testclient")
os.Setenv("ClientSecret", "testsecret")
os.Setenv("HTTPUsername", "testuser")
os.Setenv("HTTPPassword", "testpass")
os.Setenv("HTTPAudience", "testaudience")
defer func() {
os.Unsetenv("MQTT_USERNAME")
os.Unsetenv("MQTT_PASSWORD")
os.Unsetenv("MQTT_CLIENT_ID")
os.Unsetenv("DBUser")
os.Unsetenv("DBPass")
os.Unsetenv("DBName")
os.Unsetenv("HTTPClientID")
os.Unsetenv("ClientSecret")
os.Unsetenv("HTTPUsername")
os.Unsetenv("HTTPPassword")
os.Unsetenv("HTTPAudience")
}()

cfg := config.Load()
if cfg == nil {
t.Fatal("Load returned nil")
}
if cfg.MQTTUser != "testuser" {
t.Errorf("Expected MQTTUser testuser, got %s", cfg.MQTTUser)
}
if cfg.HTTPAddr != "0.0.0.0:1902" {
t.Errorf("Expected default HTTPAddr, got %s", cfg.HTTPAddr)
}
}

+ 141
- 0
tests/controller/controller_test.go Просмотреть файл

@@ -0,0 +1,141 @@
package controller

import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"

"github.com/AFASystems/presence/internal/pkg/controller"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/gorilla/mux"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)

func setupTestDB(t *testing.T) *gorm.DB {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
if err != nil {
t.Fatalf("Failed to open test DB: %v", err)
}
if err := db.AutoMigrate(&model.Gateway{}, &model.Zone{}, &model.Tracker{}, &model.TrackerZones{}, &model.Config{}, &model.Settings{}, &model.Tracks{}); err != nil {
t.Fatalf("Failed to migrate: %v", err)
}
return db
}

func TestGatewayListController_Empty(t *testing.T) {
db := setupTestDB(t)
handler := controller.GatewayListController(db)

req := httptest.NewRequest(http.MethodGet, "/gateways", nil)
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)

if rec.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", rec.Code)
}
var gateways []model.Gateway
if err := json.NewDecoder(rec.Body).Decode(&gateways); err != nil {
t.Fatalf("Failed to decode: %v", err)
}
if len(gateways) != 0 {
t.Errorf("Expected 0 gateways, got %d", len(gateways))
}
}

func TestGatewayAddController(t *testing.T) {
db := setupTestDB(t)
handler := controller.GatewayAddController(db)

gateway := model.Gateway{
ID: "gw-1",
Name: "Gateway 1",
MAC: "AA:BB:CC:DD:EE:FF",
}
body, _ := json.Marshal(gateway)
req := httptest.NewRequest(http.MethodPost, "/gateways", bytes.NewReader(body))
req.Header.Set("Content-Type", "application/json")
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)

if rec.Code != http.StatusOK {
t.Errorf("Expected 200, got %d: %s", rec.Code, rec.Body.String())
}
if rec.Body.String() != "ok" {
t.Errorf("Expected 'ok', got %s", rec.Body.String())
}

var list []model.Gateway
db.Find(&list)
if len(list) != 1 || list[0].Name != "Gateway 1" {
t.Errorf("Expected 1 gateway in DB, got %+v", list)
}
}

func TestGatewayDeleteController(t *testing.T) {
db := setupTestDB(t)
db.Create(&model.Gateway{ID: "gw-1", Name: "G1", MAC: "AA:BB:CC:DD:EE:FF"})

req := httptest.NewRequest(http.MethodDelete, "/gateways/gw-1", nil)
req = mux.SetURLVars(req, map[string]string{"id": "gw-1"})
rec := httptest.NewRecorder()
controller.GatewayDeleteController(db).ServeHTTP(rec, req)

if rec.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", rec.Code)
}

var count int64
db.Model(&model.Gateway{}).Where("id = ?", "gw-1").Count(&count)
if count != 0 {
t.Error("Expected gateway to be deleted")
}
}

func TestTrackerListController_Empty(t *testing.T) {
db := setupTestDB(t)
handler := controller.TrackerList(db)

req := httptest.NewRequest(http.MethodGet, "/trackers", nil)
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)

if rec.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", rec.Code)
}
var list []model.Tracker
if err := json.NewDecoder(rec.Body).Decode(&list); err != nil {
t.Fatalf("Failed to decode: %v", err)
}
if len(list) != 0 {
t.Errorf("Expected 0 trackers, got %d", len(list))
}
}

func TestZoneListController_Empty(t *testing.T) {
db := setupTestDB(t)
handler := controller.ZoneListController(db)

req := httptest.NewRequest(http.MethodGet, "/zones", nil)
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)

if rec.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", rec.Code)
}
}

func TestSettingsListController(t *testing.T) {
db := setupTestDB(t)
handler := controller.SettingsListController(db)

req := httptest.NewRequest(http.MethodGet, "/settings", nil)
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, req)

if rec.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", rec.Code)
}
}

+ 64
- 39
tests/decoder/decode_test.go Просмотреть файл

@@ -13,7 +13,7 @@ func TestDecodeBeacon_EmptyData(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

adv := model.BeaconAdvertisement{
ID: "test-beacon",
@@ -37,7 +37,7 @@ func TestDecodeBeacon_WhitespaceOnly(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

adv := model.BeaconAdvertisement{
ID: "test-beacon",
@@ -61,7 +61,7 @@ func TestDecodeBeacon_InvalidHex(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

adv := model.BeaconAdvertisement{
ID: "test-beacon",
@@ -85,12 +85,12 @@ func TestDecodeBeacon_ValidHexNoParser(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{} // No parsers registered
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)} // No parsers registered

// Valid hex but no matching parser
// Valid hex but no matching parser (03 02 FF 06 - type 0x02, no parser registered for it)
adv := model.BeaconAdvertisement{
ID: "test-beacon",
Data: "0201060302A0", // Valid AD structure
Data: "0302FF06", // Valid AD structure
}

// Execute
@@ -110,20 +110,25 @@ func TestDecodeBeacon_Deduplication(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Register a test parser
// Use pattern 0x02 - AD structure 03 02 FF 06 (len 3, type 0x02) - not removed by RemoveFlagBytes
config := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
Name: "test-parser",
Min: 3,
Max: 10,
Pattern: []string{"0x02"},
Configs: map[string]model.ParserConfig{
"battery": {Length: 1, Offset: 2, Order: "littleendian"},
},
}
parserRegistry.Register("test-parser", config)

// Create an event that will be parsed
// Create an event that will be parsed (03 02 FF 06 - not removed by RemoveFlagBytes)
adv := model.BeaconAdvertisement{
ID: "test-beacon",
Data: "020106", // Simple AD structure
Data: "0302FF06",
}

// First processing - should publish
@@ -150,13 +155,18 @@ func TestDecodeBeacon_DifferentDataPublishes(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Register a test parser
// Use pattern 0x02 - AD structure 03 02 FF 06 (len 3, type 0x02) - not removed by RemoveFlagBytes
config := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
Name: "test-parser",
Min: 3,
Max: 10,
Pattern: []string{"0x02"},
Configs: map[string]model.ParserConfig{
"battery": {Length: 1, Offset: 2, Order: "littleendian"},
},
}
parserRegistry.Register("test-parser", config)

@@ -176,7 +186,7 @@ func TestDecodeBeacon_DifferentDataPublishes(t *testing.T) {
// Second processing with different data - should publish again
adv2 := model.BeaconAdvertisement{
ID: "test-beacon",
Data: "020107", // Different data
Data: "0302FF07", // Different data
}

err = decodeBeacon(adv2, appState, mockWriter, parserRegistry)
@@ -194,20 +204,25 @@ func TestDecodeBeacon_WithFlagBytes(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Register a test parser
// Use pattern 0x02 - AD structure 03 02 FF 06 (len 3, type 0x02) - not removed by RemoveFlagBytes
config := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
Name: "test-parser",
Min: 3,
Max: 10,
Pattern: []string{"0x02"},
Configs: map[string]model.ParserConfig{
"battery": {Length: 1, Offset: 2, Order: "littleendian"},
},
}
parserRegistry.Register("test-parser", config)

// Data with flag bytes (0x01 at position 1)
// Data with flag bytes first (02 01 06), then our structure (03 02 FF 08)
adv := model.BeaconAdvertisement{
ID: "test-beacon",
Data: "0201060302A0", // Will have flags removed
Data: "0201060302FF08", // Flags removed, then 03 02 FF 08 remains
}

// Execute
@@ -223,21 +238,26 @@ func TestDecodeBeacon_MultipleBeacons(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Register a test parser
// Use pattern 0x02 - AD structure 03 02 FF 06 (len 3, type 0x02) - not removed by RemoveFlagBytes
config := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
Name: "test-parser",
Min: 3,
Max: 10,
Pattern: []string{"0x02"},
Configs: map[string]model.ParserConfig{
"battery": {Length: 1, Offset: 2, Order: "littleendian"},
},
}
parserRegistry.Register("test-parser", config)

// Process multiple different beacons
// Process multiple different beacons (03 02 FF xx - not removed by RemoveFlagBytes)
beacons := []model.BeaconAdvertisement{
{ID: "beacon-1", Data: "020106"},
{ID: "beacon-2", Data: "020107"},
{ID: "beacon-3", Data: "020108"},
{ID: "beacon-1", Data: "0302FF06"},
{ID: "beacon-2", Data: "0302FF07"},
{ID: "beacon-3", Data: "0302FF08"},
}

for _, adv := range beacons {
@@ -257,7 +277,7 @@ func TestProcessIncoming_ErrorHandling(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Invalid data that will cause an error
adv := model.BeaconAdvertisement{
@@ -278,19 +298,24 @@ func TestDecodeBeacon_EventHashing(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Register a test parser that creates consistent events
// Use pattern 0x02 - AD structure 03 02 FF 06 (len 3, type 0x02) - not removed by RemoveFlagBytes
config := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
Name: "test-parser",
Min: 3,
Max: 10,
Pattern: []string{"0x02"},
Configs: map[string]model.ParserConfig{
"battery": {Length: 1, Offset: 2, Order: "littleendian"},
},
}
parserRegistry.Register("test-parser", config)

adv := model.BeaconAdvertisement{
ID: "test-beacon",
Data: "020106",
Data: "0302FF06",
}

// First processing
@@ -327,7 +352,7 @@ func TestDecodeBeacon_VariousHexFormats(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

testCases := []struct {
name string
@@ -355,7 +380,7 @@ func TestDecodeBeacon_VariousHexFormats(t *testing.T) {
t.Errorf("Expected error for %s, got nil", tc.name)
}

if !tc.shouldError && err != nil && !bytes.Contains(err.Error(), []byte("no parser")) {
if !tc.shouldError && err != nil && !bytes.Contains([]byte(err.Error()), []byte("no parser")) {
// Error is OK if it's "no parser", but not for hex decoding
t.Logf("Got expected error for %s: %v", tc.name, err)
}


+ 32
- 38
tests/decoder/event_loop_test.go Просмотреть файл

@@ -7,13 +7,14 @@ import (

"github.com/AFASystems/presence/internal/pkg/common/appcontext"
"github.com/AFASystems/presence/internal/pkg/model"
"github.com/segmentio/kafka-go"
)

func TestEventLoop_RawMessageProcessing(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

chRaw := make(chan model.BeaconAdvertisement, 10)
ctx, cancel := context.WithCancel(context.Background())
@@ -52,8 +53,7 @@ func TestEventLoop_RawMessageProcessing(t *testing.T) {

func TestEventLoop_ParserRegistryUpdates(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

chParser := make(chan model.KafkaParser, 10)

@@ -62,9 +62,10 @@ func TestEventLoop_ParserRegistryUpdates(t *testing.T) {
ID: "add",
Name: "new-parser",
Config: model.Config{
Name: "new-parser",
Prefix: "02",
Length: 2,
Name: "new-parser",
Min: 2,
Max: 10,
Pattern: []string{"0x02"},
},
}

@@ -124,14 +125,14 @@ func TestEventLoop_ParserRegistryUpdates(t *testing.T) {

func TestEventLoop_UpdateParser(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Add initial parser
parserRegistry.Register("test-parser", model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
Name: "test-parser",
Min: 2,
Max: 10,
Pattern: []string{"0x02"},
})

chParser := make(chan model.KafkaParser, 10)
@@ -141,9 +142,10 @@ func TestEventLoop_UpdateParser(t *testing.T) {
ID: "update",
Name: "test-parser",
Config: model.Config{
Name: "test-parser",
Prefix: "03",
Length: 3,
Name: "test-parser",
Min: 3,
Max: 15,
Pattern: []string{"0x03"},
},
}

@@ -178,18 +180,17 @@ func TestEventLoop_UpdateParser(t *testing.T) {

func TestEventLoop_MultipleParserOperations(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

chParser := make(chan model.KafkaParser, 10)

// Send multiple operations
operations := []model.KafkaParser{
{ID: "add", Name: "parser-1", Config: model.Config{Name: "parser-1", Prefix: "02", Length: 2}},
{ID: "add", Name: "parser-2", Config: model.Config{Name: "parser-2", Prefix: "03", Length: 3}},
{ID: "add", Name: "parser-3", Config: model.Config{Name: "parser-3", Prefix: "04", Length: 4}},
{ID: "add", Name: "parser-1", Config: model.Config{Name: "parser-1", Min: 2, Max: 10, Pattern: []string{"0x02"}}},
{ID: "add", Name: "parser-2", Config: model.Config{Name: "parser-2", Min: 3, Max: 15, Pattern: []string{"0x03"}}},
{ID: "add", Name: "parser-3", Config: model.Config{Name: "parser-3", Min: 4, Max: 20, Pattern: []string{"0x04"}}},
{ID: "delete", Name: "parser-2"},
{ID: "update", Name: "parser-1", Config: model.Config{Name: "parser-1", Prefix: "05", Length: 5}},
{ID: "update", Name: "parser-1", Config: model.Config{Name: "parser-1", Min: 5, Max: 25, Pattern: []string{"0x05"}}},
}

for _, op := range operations {
@@ -262,16 +263,11 @@ func TestEventLoop_ContextCancellation(t *testing.T) {
}

func TestEventLoop_ChannelBuffering(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}

// Create buffered channels (like in main)
// Setup - create buffered channels (like in main)
chRaw := make(chan model.BeaconAdvertisement, 2000)
chParser := make(chan model.KafkaParser, 200)

ctx, cancel := context.WithCancel(context.Background())
_, cancel := context.WithCancel(context.Background())
defer cancel()

// Send multiple messages without blocking
@@ -294,9 +290,10 @@ func TestEventLoop_ChannelBuffering(t *testing.T) {
ID: "add",
Name: "parser-" + string(rune('A'+i)),
Config: model.Config{
Name: "parser-" + string(rune('A'+i)),
Prefix: "02",
Length: 2,
Name: "parser-" + string(rune('A'+i)),
Min: 2,
Max: 10,
Pattern: []string{"0x02"},
},
}
chParser <- msg
@@ -313,14 +310,10 @@ func TestEventLoop_ChannelBuffering(t *testing.T) {

func TestEventLoop_ParserAndRawChannels(t *testing.T) {
// Setup
appState := appcontext.NewAppState()
mockWriter := &MockKafkaWriter{Messages: []kafka.Message{}}
parserRegistry := &model.ParserRegistry{}

chRaw := make(chan model.BeaconAdvertisement, 10)
chParser := make(chan model.KafkaParser, 10)

ctx, cancel := context.WithCancel(context.Background())
_, cancel := context.WithCancel(context.Background())
defer cancel()

// Send both raw and parser messages
@@ -333,9 +326,10 @@ func TestEventLoop_ParserAndRawChannels(t *testing.T) {
ID: "add",
Name: "test-parser",
Config: model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
Name: "test-parser",
Min: 2,
Max: 10,
Pattern: []string{"0x02"},
},
}



+ 51
- 54
tests/decoder/integration_test.go Просмотреть файл

@@ -2,7 +2,6 @@ package decoder

import (
"context"
"encoding/json"
"os"
"testing"
"time"
@@ -14,8 +13,8 @@ import (

// TestIntegration_DecoderEndToEnd tests the complete decoder flow
func TestIntegration_DecoderEndToEnd(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
if testing.Short() || os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping integration test (set E2E_TEST=1 and run without -short)")
}

// Check if Kafka is available
@@ -25,20 +24,19 @@ func TestIntegration_DecoderEndToEnd(t *testing.T) {
}

// Create test topics
rawTopic := "test-rawbeacons-" + time.Now().Format("20060102150405")
alertTopic := "test-alertbeacons-" + time.Now().Format("20060102150405")

// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Register a test parser
config := model.Config{
Name: "integration-test-parser",
Prefix: "02",
Length: 2,
MinLength: 2,
MaxLength: 20,
Name: "integration-test-parser",
Min: 2,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{},
}
parserRegistry.Register("integration-test-parser", config)

@@ -81,8 +79,8 @@ func TestIntegration_DecoderEndToEnd(t *testing.T) {

// TestIntegration_ParserRegistryOperations tests parser registry with real Kafka
func TestIntegration_ParserRegistryOperations(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
if testing.Short() || os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping integration test (set E2E_TEST=1 and run without -short)")
}

kafkaURL := os.Getenv("KAFKA_URL")
@@ -93,8 +91,7 @@ func TestIntegration_ParserRegistryOperations(t *testing.T) {
alertTopic := "test-alertbeacons-registry-" + time.Now().Format("20060102150405")

// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

writer := kafka.NewWriter(kafka.WriterConfig{
Brokers: []string{kafkaURL},
@@ -108,10 +105,10 @@ func TestIntegration_ParserRegistryOperations(t *testing.T) {
Name: "kafka-test-parser",
Config: model.Config{
Name: "kafka-test-parser",
Prefix: "02",
Length: 2,
MinLength: 2,
MaxLength: 20,
Min: 2,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{},
},
}

@@ -139,8 +136,8 @@ func TestIntegration_ParserRegistryOperations(t *testing.T) {

// TestIntegration_MultipleBeaconsSequential tests processing multiple beacons
func TestIntegration_MultipleBeaconsSequential(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
if testing.Short() || os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping integration test (set E2E_TEST=1 and run without -short)")
}

kafkaURL := os.Getenv("KAFKA_URL")
@@ -152,15 +149,15 @@ func TestIntegration_MultipleBeaconsSequential(t *testing.T) {

// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Register parser
config := model.Config{
Name: "multi-test-parser",
Prefix: "02",
Length: 2,
MinLength: 2,
MaxLength: 20,
Name: "multi-test-parser",
Min: 2,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{},
}
parserRegistry.Register("multi-test-parser", config)

@@ -207,8 +204,8 @@ func TestIntegration_MultipleBeaconsSequential(t *testing.T) {

// TestIntegration_EventDeduplication tests that duplicate events are not published
func TestIntegration_EventDeduplication(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
if testing.Short() || os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping integration test (set E2E_TEST=1 and run without -short)")
}

kafkaURL := os.Getenv("KAFKA_URL")
@@ -220,15 +217,15 @@ func TestIntegration_EventDeduplication(t *testing.T) {

// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

// Register parser
config := model.Config{
Name: "dedup-test-parser",
Prefix: "02",
Length: 2,
MinLength: 2,
MaxLength: 20,
Name: "dedup-test-parser",
Min: 2,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{},
}
parserRegistry.Register("dedup-test-parser", config)

@@ -291,8 +288,8 @@ func TestIntegration_EventDeduplication(t *testing.T) {

// TestIntegration_AppStatePersistence tests that events persist in AppState
func TestIntegration_AppStatePersistence(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
if testing.Short() || os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping integration test (set E2E_TEST=1 and run without -short)")
}

kafkaURL := os.Getenv("KAFKA_URL")
@@ -304,14 +301,14 @@ func TestIntegration_AppStatePersistence(t *testing.T) {

// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

config := model.Config{
Name: "persist-test-parser",
Prefix: "02",
Length: 2,
MinLength: 2,
MaxLength: 20,
Name: "persist-test-parser",
Min: 2,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{},
}
parserRegistry.Register("persist-test-parser", config)

@@ -352,8 +349,8 @@ func TestIntegration_AppStatePersistence(t *testing.T) {

// TestIntegration_ParserUpdateFlow tests updating parsers during runtime
func TestIntegration_ParserUpdateFlow(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
if testing.Short() || os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping integration test (set E2E_TEST=1 and run without -short)")
}

kafkaURL := os.Getenv("KAFKA_URL")
@@ -365,7 +362,7 @@ func TestIntegration_ParserUpdateFlow(t *testing.T) {

// Setup
appState := appcontext.NewAppState()
parserRegistry := &model.ParserRegistry{}
parserRegistry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}

writer := kafka.NewWriter(kafka.WriterConfig{
Brokers: []string{kafkaURL},
@@ -375,11 +372,11 @@ func TestIntegration_ParserUpdateFlow(t *testing.T) {

// Initial parser config
config1 := model.Config{
Name: "update-test-parser",
Prefix: "02",
Length: 2,
MinLength: 2,
MaxLength: 20,
Name: "update-test-parser",
Min: 2,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{},
}
parserRegistry.Register("update-test-parser", config1)

@@ -395,10 +392,10 @@ func TestIntegration_ParserUpdateFlow(t *testing.T) {
// Update parser config
config2 := model.Config{
Name: "update-test-parser",
Prefix: "03",
Length: 3,
MinLength: 3,
MaxLength: 25,
Min: 3,
Max: 25,
Pattern: []string{"0x03"},
Configs: map[string]model.ParserConfig{},
}
parserRegistry.Register("update-test-parser", config2)



+ 21
- 194
tests/decoder/parser_registry_test.go Просмотреть файл

@@ -6,107 +6,76 @@ import (
"github.com/AFASystems/presence/internal/pkg/model"
)

func TestParserRegistry_AddParser(t *testing.T) {
// Setup
registry := &model.ParserRegistry{}
func newParserRegistry() *model.ParserRegistry {
return &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}
}

// Add a parser
func TestParserRegistry_AddParser(t *testing.T) {
registry := newParserRegistry()
config := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
Name: "test-parser",
Min: 4,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{},
}

registry.Register("test-parser", config)

// Verify parser was added
if len(registry.ParserList) != 1 {
t.Errorf("Expected 1 parser in registry, got %d", len(registry.ParserList))
}

if _, exists := registry.ParserList["test-parser"]; !exists {
t.Error("Parser 'test-parser' should exist in registry")
}
}

func TestParserRegistry_RemoveParser(t *testing.T) {
// Setup
registry := &model.ParserRegistry{}

config := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
}

registry := newParserRegistry()
config := model.Config{Name: "test-parser", Min: 2, Max: 10, Pattern: []string{"0x02"}}
registry.Register("test-parser", config)

// Remove parser
registry.Unregister("test-parser")

// Verify parser was removed
if len(registry.ParserList) != 0 {
t.Errorf("Expected 0 parsers in registry, got %d", len(registry.ParserList))
}

if _, exists := registry.ParserList["test-parser"]; exists {
t.Error("Parser 'test-parser' should not exist in registry")
}
}

func TestParserRegistry_UpdateParser(t *testing.T) {
// Setup
registry := &model.ParserRegistry{}

// Add initial parser
config1 := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
}
registry := newParserRegistry()
config1 := model.Config{Name: "test-parser", Min: 2, Max: 10, Pattern: []string{"0x02"}}
config2 := model.Config{Name: "test-parser", Min: 5, Max: 15, Pattern: []string{"0x03"}}

registry.Register("test-parser", config1)

// Update parser
config2 := model.Config{
Name: "test-parser",
Prefix: "03",
Length: 3,
}

registry.Register("test-parser", config2)

// Verify only one parser exists
if len(registry.ParserList) != 1 {
t.Errorf("Expected 1 parser in registry, got %d", len(registry.ParserList))
}

// Verify it was updated (the new config should be used)
if _, exists := registry.ParserList["test-parser"]; !exists {
t.Error("Parser 'test-parser' should exist in registry")
}
}

func TestParserRegistry_MultipleParsers(t *testing.T) {
// Setup
registry := &model.ParserRegistry{}

// Add multiple parsers
registry := newParserRegistry()
parsers := []model.Config{
{Name: "parser-1", Prefix: "02", Length: 2},
{Name: "parser-2", Prefix: "03", Length: 3},
{Name: "parser-3", Prefix: "04", Length: 4},
{Name: "parser-1", Min: 2, Max: 10, Pattern: []string{"0x02"}},
{Name: "parser-2", Min: 3, Max: 15, Pattern: []string{"0x03"}},
{Name: "parser-3", Min: 4, Max: 20, Pattern: []string{"0x04"}},
}

for _, p := range parsers {
registry.Register(p.Name, p)
}

// Verify all parsers were added
if len(registry.ParserList) != 3 {
t.Errorf("Expected 3 parsers in registry, got %d", len(registry.ParserList))
}

for _, p := range parsers {
if _, exists := registry.ParserList[p.Name]; !exists {
t.Errorf("Parser '%s' should exist in registry", p.Name)
@@ -115,161 +84,19 @@ func TestParserRegistry_MultipleParsers(t *testing.T) {
}

func TestParserRegistry_RemoveNonExistent(t *testing.T) {
// Setup
registry := &model.ParserRegistry{}

// Try to remove non-existent parser - should not panic
registry := newParserRegistry()
registry.Unregister("non-existent")

// Verify registry is still empty
if len(registry.ParserList) != 0 {
t.Errorf("Expected 0 parsers, got %d", len(registry.ParserList))
}
}

func TestParserRegistry_ConcurrentAccess(t *testing.T) {
// Setup
registry := &model.ParserRegistry{}
done := make(chan bool)

// Concurrent additions
for i := 0; i < 10; i++ {
go func(index int) {
config := model.Config{
Name: "parser-" + string(rune('A'+index)),
Prefix: "02",
Length: 2,
}
registry.Register(config.Name, config)
done <- true
}(i)
}

// Wait for all goroutines
for i := 0; i < 10; i++ {
<-done
}

// Verify all parsers were added
if len(registry.ParserList) != 10 {
t.Errorf("Expected 10 parsers, got %d", len(registry.ParserList))
}
}

func TestParserConfig_Structure(t *testing.T) {
config := model.Config{
Name: "test-config",
Prefix: "0201",
MinLength: 10,
MaxLength: 30,
ParserType: "sensor",
}

if config.Name != "test-config" {
t.Errorf("Expected name 'test-config', got '%s'", config.Name)
}

if config.Prefix != "0201" {
t.Errorf("Expected prefix '0201', got '%s'", config.Prefix)
}

if config.MinLength != 10 {
t.Errorf("Expected MinLength 10, got %d", config.MinLength)
}

if config.MaxLength != 30 {
t.Errorf("Expected MaxLength 30, got %d", config.MaxLength)
}
}

func TestKafkaParser_MessageTypes(t *testing.T) {
testCases := []struct {
name string
id string
config model.Config
expected string
}{
{
name: "add parser",
id: "add",
config: model.Config{Name: "new-parser", Prefix: "02", Length: 2},
expected: "add",
},
{
name: "delete parser",
id: "delete",
config: model.Config{Name: "old-parser", Prefix: "02", Length: 2},
expected: "delete",
},
{
name: "update parser",
id: "update",
config: model.Config{Name: "updated-parser", Prefix: "03", Length: 3},
expected: "update",
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
msg := model.KafkaParser{
ID: tc.id,
Name: tc.config.Name,
Config: tc.config,
}

if msg.ID != tc.expected {
t.Errorf("Expected ID '%s', got '%s'", tc.expected, msg.ID)
}

if msg.Name != tc.config.Name {
t.Errorf("Expected Name '%s', got '%s'", tc.config.Name, msg.Name)
}
})
}
}

func TestParserRegistry_EmptyRegistry(t *testing.T) {
// Setup empty registry
registry := &model.ParserRegistry{}
registry := newParserRegistry()

// Verify it's empty
if len(registry.ParserList) != 0 {
t.Errorf("Expected empty registry, got %d parsers", len(registry.ParserList))
}

// Should be safe to call Unregister on empty registry
registry.Unregister("anything")
}

func TestParserRegistry_ParserReplacement(t *testing.T) {
// Setup
registry := &model.ParserRegistry{}

// Add parser with config 1
config1 := model.Config{
Name: "test-parser",
Prefix: "02",
Length: 2,
}

registry.Register("test-parser", config1)

// Replace with config 2 (same name)
config2 := model.Config{
Name: "test-parser",
Prefix: "03",
Length: 3,
}

registry.Register("test-parser", config2)

// Verify only one entry exists
if len(registry.ParserList) != 1 {
t.Errorf("Expected 1 parser after replacement, got %d", len(registry.ParserList))
}

// Verify the parser still exists
if _, exists := registry.ParserList["test-parser"]; !exists {
t.Error("Parser 'test-parser' should still exist")
}
}

+ 17
- 0
tests/e2e/e2e_test.go Просмотреть файл

@@ -0,0 +1,17 @@
// Package e2e contains end-to-end tests that require external services (Kafka, MQTT, DB).
// Run with: go test -v ./tests/e2e/...
// These tests are skipped by default unless E2E_TEST=1 is set.

package e2e

import (
"os"
"testing"
)

func TestE2E_SkipByDefault(t *testing.T) {
if os.Getenv("E2E_TEST") != "1" {
t.Skip("Skipping e2e tests (set E2E_TEST=1 to run)")
}
t.Log("E2E tests would run with Kafka, MQTT, and PostgreSQL available")
}

+ 87
- 0
tests/kafkaclient/manager_test.go Просмотреть файл

@@ -0,0 +1,87 @@
package kafkaclient

import (
"os"
"testing"

"github.com/AFASystems/presence/internal/pkg/kafkaclient"
)

func TestInitKafkaManager(t *testing.T) {
m := kafkaclient.InitKafkaManager()
if m == nil {
t.Fatal("InitKafkaManager returned nil")
}
}

func TestPopulateKafkaManager_Writers(t *testing.T) {
if os.Getenv("E2E_TEST") != "1" {
t.Skip("Kafka manager tests require E2E_TEST=1 (Kafka connection)")
}
m := kafkaclient.InitKafkaManager()
topics := []string{"topic1", "topic2"}
m.PopulateKafkaManager("localhost:9092", "", topics)

if m.GetWriter("topic1") == nil {
t.Error("Expected writer for topic1")
}
if m.GetWriter("topic2") == nil {
t.Error("Expected writer for topic2")
}
if m.GetWriter("nonexistent") != nil {
t.Error("Expected nil for nonexistent topic")
}

m.CleanKafkaWriters()
}

func TestPopulateKafkaManager_Readers(t *testing.T) {
if os.Getenv("E2E_TEST") != "1" {
t.Skip("Kafka manager tests require E2E_TEST=1")
}
m := kafkaclient.InitKafkaManager()
topics := []string{"topic1", "topic2"}
m.PopulateKafkaManager("localhost:9092", "test-group", topics)

if m.GetReader("topic1") == nil {
t.Error("Expected reader for topic1")
}
if m.GetReader("topic2") == nil {
t.Error("Expected reader for topic2")
}
if m.GetReader("nonexistent") != nil {
t.Error("Expected nil for nonexistent topic")
}

m.CleanKafkaReaders()
}

func TestAddKafkaWriter(t *testing.T) {
if os.Getenv("E2E_TEST") != "1" {
t.Skip("Kafka manager tests require E2E_TEST=1")
}
m := kafkaclient.InitKafkaManager()
m.AddKafkaWriter("localhost:9092", "test-topic")

w := m.GetWriter("test-topic")
if w == nil {
t.Error("Expected writer after AddKafkaWriter")
}

m.CleanKafkaWriters()
}

func TestAddKafkaReader(t *testing.T) {
if os.Getenv("E2E_TEST") != "1" {
t.Skip("Kafka manager tests require E2E_TEST=1")
}
m := kafkaclient.InitKafkaManager()
m.AddKafkaReader("localhost:9092", "test-topic", "test-group")

r := m.GetReader("test-topic")
if r == nil {
t.Error("Expected reader after AddKafkaReader")
}

m.CleanKafkaReaders()
}

+ 41
- 0
tests/location/location_test.go Просмотреть файл

@@ -0,0 +1,41 @@
package location

import (
"testing"

"github.com/AFASystems/presence/internal/pkg/common/utils"
"github.com/AFASystems/presence/internal/pkg/model"
)

// Test location algorithm scoring formula: seenW + (rssiW * (1.0 - (rssi / -100.0)))
func TestLocationScoringFormula(t *testing.T) {
seenW := 1.5
rssiW := 0.75

tests := []struct {
rssi int64
wantMin float64
wantMax float64
}{
{-50, 1.85, 1.9}, // 1.5 + 0.75*0.5 = 1.875
{-100, 1.45, 1.55}, // 1.5 + 0.75*0 = 1.5
{-80, 1.6, 1.7}, // 1.5 + 0.75*0.2 = 1.65
}
for _, tt := range tests {
score := seenW + (rssiW * (1.0 - (float64(tt.rssi) / -100.0)))
if score < tt.wantMin || score > tt.wantMax {
t.Errorf("RSSI %d: score %f outside expected [%f, %f]", tt.rssi, score, tt.wantMin, tt.wantMax)
}
}
}

func TestCalculateDistance_ForLocation(t *testing.T) {
adv := model.BeaconAdvertisement{
RSSI: -65,
TXPower: "C5",
}
d := utils.CalculateDistance(adv)
if d < 0 {
t.Errorf("Distance should be non-negative, got %f", d)
}
}

+ 28
- 0
tests/logger/logger_test.go Просмотреть файл

@@ -0,0 +1,28 @@
package logger

import (
"os"
"path/filepath"
"testing"

"github.com/AFASystems/presence/internal/pkg/logger"
)

func TestCreateLogger(t *testing.T) {
tmpDir := t.TempDir()
logFile := filepath.Join(tmpDir, "test.log")

log, cleanup := logger.CreateLogger(logFile)
if log == nil {
t.Fatal("CreateLogger returned nil logger")
}
if cleanup == nil {
t.Fatal("CreateLogger returned nil cleanup")
}

cleanup()

if _, err := os.Stat(logFile); os.IsNotExist(err) {
t.Error("Log file was not created")
}
}

+ 106
- 0
tests/model/model_test.go Просмотреть файл

@@ -0,0 +1,106 @@
package model

import (
"encoding/json"
"testing"

"github.com/AFASystems/presence/internal/pkg/model"
)

func TestBeaconEvent_Hash(t *testing.T) {
e := model.BeaconEvent{
ID: "beacon-1",
Name: "beacon-1",
Type: "iBeacon",
Battery: 85,
Event: 1,
}
hash := e.Hash()
if len(hash) == 0 {
t.Error("Expected non-empty hash")
}

// Same event should produce same hash
hash2 := e.Hash()
if string(hash) != string(hash2) {
t.Error("Hash should be deterministic")
}
}

func TestBeaconEvent_Hash_BatteryRounded(t *testing.T) {
e1 := model.BeaconEvent{ID: "1", Battery: 84, Event: 1}
e2 := model.BeaconEvent{ID: "1", Battery: 89, Event: 1}
hash1 := e1.Hash()
hash2 := e2.Hash()
// Battery is rounded to nearest 10, so 84 and 89 should produce same hash
if string(hash1) != string(hash2) {
t.Error("Battery rounding should make 84 and 89 produce same hash")
}
}

func TestBeaconEvent_ToJSON(t *testing.T) {
e := model.BeaconEvent{
ID: "beacon-1",
Name: "Test",
Type: "iBeacon",
Battery: 100,
}
data, err := e.ToJSON()
if err != nil {
t.Fatalf("ToJSON failed: %v", err)
}
var decoded model.BeaconEvent
if err := json.Unmarshal(data, &decoded); err != nil {
t.Fatalf("Failed to unmarshal: %v", err)
}
if decoded.ID != e.ID || decoded.Battery != e.Battery {
t.Errorf("Decoded mismatch: got %+v", decoded)
}
}

func TestParserRegistry_RegisterAndUnregister(t *testing.T) {
registry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}
config := model.Config{
Name: "test-parser",
Min: 4,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{
"battery": {Length: 1, Offset: 2, Order: "littleendian"},
},
}

registry.Register("test-parser", config)
if len(registry.ParserList) != 1 {
t.Errorf("Expected 1 parser, got %d", len(registry.ParserList))
}

registry.Unregister("test-parser")
if len(registry.ParserList) != 0 {
t.Errorf("Expected 0 parsers after Unregister, got %d", len(registry.ParserList))
}
}

func TestParserRegistry_UpdateOverwrites(t *testing.T) {
registry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}
config1 := model.Config{Name: "p1", Min: 2, Max: 10, Pattern: []string{"0x02"}}
config2 := model.Config{Name: "p1", Min: 5, Max: 15, Pattern: []string{"0x03"}}

registry.Register("p1", config1)
registry.Register("p1", config2)

if len(registry.ParserList) != 1 {
t.Errorf("Expected 1 parser after update, got %d", len(registry.ParserList))
}
}

func TestConfig_GetPatternBytes(t *testing.T) {
config := model.Config{Pattern: []string{"0xFF", "0x4C", "0x00"}}
bytes := config.GetPatternBytes()
if len(bytes) != 3 {
t.Fatalf("Expected 3 bytes, got %d", len(bytes))
}
if bytes[0] != 0xFF || bytes[1] != 0x4C || bytes[2] != 0x00 {
t.Errorf("Expected [0xFF, 0x4C, 0x00], got %v", bytes)
}
}

+ 99
- 0
tests/service/service_test.go Просмотреть файл

@@ -0,0 +1,99 @@
package service

import (
"context"
"encoding/json"
"testing"

"github.com/AFASystems/presence/internal/pkg/model"
"github.com/AFASystems/presence/internal/pkg/service"
"github.com/segmentio/kafka-go"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)

func setupTestDB(t *testing.T) *gorm.DB {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
if err != nil {
t.Fatalf("Failed to open test DB: %v", err)
}
if err := db.AutoMigrate(&model.Gateway{}, &model.Zone{}, &model.Tracker{}, &model.TrackerZones{}, &model.Tracks{}); err != nil {
t.Fatalf("Failed to migrate: %v", err)
}
return db
}

func TestSendParserConfig(t *testing.T) {
mockWriter := &mockKafkaWriter{}
kp := model.KafkaParser{
ID: "add",
Name: "test-parser",
Config: model.Config{
Name: "test-parser",
Min: 4,
Max: 20,
Pattern: []string{"0x02", "0x01"},
Configs: map[string]model.ParserConfig{},
},
}
ctx := context.Background()

err := service.SendParserConfig(kp, mockWriter, ctx)
if err != nil {
t.Fatalf("SendParserConfig failed: %v", err)
}
if len(mockWriter.messages) != 1 {
t.Errorf("Expected 1 Kafka message, got %d", len(mockWriter.messages))
}
var decoded model.KafkaParser
if err := json.Unmarshal(mockWriter.messages[0].Value, &decoded); err != nil {
t.Fatalf("Failed to unmarshal: %v", err)
}
if decoded.ID != "add" || decoded.Config.Name != "test-parser" {
t.Errorf("Expected add/test-parser, got %s/%s", decoded.ID, decoded.Config.Name)
}
}

func TestLocationToBeaconService_EmptyID(t *testing.T) {
db := setupTestDB(t)
mockWriter := &mockKafkaWriter{}
ctx := context.Background()

msg := model.HTTPLocation{ID: "", Location: "gateway1"}
service.LocationToBeaconService(msg, db, mockWriter, ctx)
// Should return early, no panic
}

func TestLocationToBeaconService_WithValidData(t *testing.T) {
db := setupTestDB(t)
// Create prerequisite data (formatMac converts AABBCCDDEEFF -> AA:BB:CC:DD:EE:FF)
db.Create(&model.Gateway{ID: "gw-1", MAC: "AA:BB:CC:DD:EE:FF", Floor: "1", Building: "B1", X: 10, Y: 20})
db.Create(&model.Tracker{ID: "tr-1", MAC: "112233445566"})
db.Create(&model.TrackerZones{ID: "tz-1", Tracker: "tr-1", ZoneList: []string{"gw-1"}})

mockWriter := &mockKafkaWriter{}
ctx := context.Background()

msg := model.HTTPLocation{
ID: "tr-1",
Location: "AABBCCDDEEFF",
Distance: 2.5,
RSSI: -65,
}
service.LocationToBeaconService(msg, db, mockWriter, ctx)

var tracker model.Tracker
db.Where("id = ?", "tr-1").First(&tracker)
if tracker.Location != "gw-1" {
t.Errorf("Expected tracker location gw-1, got %s", tracker.Location)
}
}

type mockKafkaWriter struct {
messages []kafka.Message
}

func (m *mockKafkaWriter) WriteMessages(ctx context.Context, msgs ...kafka.Message) error {
m.messages = append(m.messages, msgs...)
return nil
}

+ 110
- 0
tests/utils/utils_test.go Просмотреть файл

@@ -0,0 +1,110 @@
package utils

import (
"testing"

"github.com/AFASystems/presence/internal/pkg/common/utils"
"github.com/AFASystems/presence/internal/pkg/model"
)

func TestParseADFast_Empty(t *testing.T) {
result := utils.ParseADFast([]byte{})
if len(result) != 0 {
t.Errorf("Expected empty result, got %d structures", len(result))
}
}

func TestParseADFast_SingleStructure(t *testing.T) {
// Length 2, type 0x01, data 0x06 -> [0,3)
data := []byte{0x02, 0x01, 0x06}
result := utils.ParseADFast(data)
if len(result) != 1 {
t.Fatalf("Expected 1 structure, got %d", len(result))
}
if result[0][0] != 0 || result[0][1] != 3 {
t.Errorf("Expected [0,3), got [%d,%d)", result[0][0], result[0][1])
}
}

func TestParseADFast_MultipleStructures(t *testing.T) {
// First: len=2 (bytes 0-2), Second: len=3 (bytes 3-6)
data := []byte{0x02, 0x01, 0x06, 0x03, 0xFF, 0x4C, 0x00}
result := utils.ParseADFast(data)
if len(result) != 2 {
t.Fatalf("Expected 2 structures, got %d", len(result))
}
if result[0][0] != 0 || result[0][1] != 3 {
t.Errorf("First structure: expected [0,3), got [%d,%d)", result[0][0], result[0][1])
}
if result[1][0] != 3 || result[1][1] != 7 {
t.Errorf("Second structure: expected [3,7), got [%d,%d)", result[1][0], result[1][1])
}
}

func TestParseADFast_ZeroLengthBreaks(t *testing.T) {
data := []byte{0x00, 0x01, 0x02}
result := utils.ParseADFast(data)
if len(result) != 0 {
t.Errorf("Expected 0 structures (zero length breaks), got %d", len(result))
}
}

func TestRemoveFlagBytes_WithFlags(t *testing.T) {
// AD structure: len=2, type=0x01 (flags), data=0x06
// Remaining: 0x03, 0xFF, 0x4C
data := []byte{0x02, 0x01, 0x06, 0x03, 0xFF, 0x4C, 0x00}
result := utils.RemoveFlagBytes(data)
if len(result) != 4 {
t.Errorf("Expected 4 bytes after flag removal, got %d", len(result))
}
if result[0] != 0x03 && result[1] != 0xFF {
t.Errorf("Expected flag bytes removed, got %v", result)
}
}

func TestRemoveFlagBytes_WithoutFlags(t *testing.T) {
data := []byte{0x02, 0xFF, 0x4C, 0x00} // type 0xFF, not 0x01
result := utils.RemoveFlagBytes(data)
if len(result) != 4 {
t.Errorf("Expected unchanged 4 bytes, got %d", len(result))
}
}

func TestRemoveFlagBytes_TooShort(t *testing.T) {
data := []byte{0x01}
result := utils.RemoveFlagBytes(data)
if len(result) != 1 {
t.Errorf("Expected 1 byte, got %d", len(result))
}
}

func TestCalculateDistance(t *testing.T) {
tests := []struct {
name string
rssi int64
txPower string
}{
{"typical beacon", -65, "C5"}, // -59 in two's complement
{"weak signal", -90, "C5"},
{"strong signal", -40, "C5"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
adv := model.BeaconAdvertisement{RSSI: tt.rssi, TXPower: tt.txPower}
d := utils.CalculateDistance(adv)
if d < 0 {
t.Errorf("Distance should be non-negative, got %f", d)
}
})
}
}

func TestLoopADStructures_NoParsers(t *testing.T) {
registry := &model.ParserRegistry{ParserList: make(map[string]model.BeaconParser)}
data := []byte{0x02, 0x01, 0x06}
indices := utils.ParseADFast(data)
event := utils.LoopADStructures(data, indices, "beacon-1", registry)
if event.ID != "" {
t.Errorf("Expected empty event with no parsers, got ID %s", event.ID)
}
}

Загрузка…
Отмена
Сохранить