Compare commits

...

270 Commits

Author SHA1 Message Date
Ahmed Bouhuolia
2c05785096 Merge pull request #934 from bigcapitalhq/fix/branches-activation-bills
fix(server): branches activation not marking bills and payments with primary branch
2026-02-05 16:06:39 +02:00
Ahmed Bouhuolia
6af4be9c6c fix(server): branches activation not marking bills and payments with primary branch
When activating the multi-branches feature, existing bills, vendor credits,
and bill payments were not being marked with the default primary branch.

Changes:
- Add missing @Inject decorators to BillActivateBranches, VendorCreditActivateBranches,
  and BillPaymentsActivateBranches services
- Create BillBranchesActivateSubscriber to listen to onActivated event
- Create VendorCreditBranchesActivateSubscriber to listen to onActivated event
- Register BillPaymentsActivateBranches and PaymentMadeActivateBranchesSubscriber
  in BranchesModule
- Add branch object to BillResponseDto for API responses
- Add branch to BillTransformer includeAttributes

Fixes: #935
2026-02-05 16:03:57 +02:00
Ahmed Bouhuolia
8def1d31d2 Merge pull request #933 from bigcapitalhq/20260205-151219-7770
feat(webapp): add blurry background to sticky data table cells
2026-02-05 15:29:47 +02:00
Ahmed Bouhuolia
afab02a053 feat(webapp): add blurry background to sticky data table cells
Add backdrop-filter blur effect to sticky column cells in financial reports
to prevent content from showing through during horizontal scrolling.
The effect only applies when rows are not hovered to preserve hover
background interactions.
2026-02-05 15:27:45 +02:00
Ahmed Bouhuolia
8e925c62f2 Merge pull request #932 from bigcapitalhq/20260205-151219-7770
fix(server): balance sheet query validation schema
2026-02-05 15:14:45 +02:00
Ahmed Bouhuolia
1b7d513adf fix(server): balance sheet query validation schema 2026-02-05 15:12:54 +02:00
Ahmed Bouhuolia
7d764fb390 Merge pull request #931 from bigcapitalhq/fix/item-error-handling
fix(items): correct error type handling and add swagger documentation
2026-02-04 21:44:45 +02:00
Ahmed Bouhuolia
c571f50a74 fix(items): correct error type handling and add swagger documentation
- Fix error type mismatch: change 'ITEM.NAME.ALREADY.EXISTS' to 'ITEM_NAME_EXISTS'
- Add ItemErrorType constant with UpperCamelCase keys for better maintainability
- Update all error checks to use the new ItemErrorType constant
- Add ItemErrorResponse.dto.ts with documented error types for swagger
- Add @ApiResponse decorators to document 400 validation errors in swagger
2026-02-04 21:42:39 +02:00
Ahmed Bouhuolia
6549026344 Merge pull request #930 from bigcapitalhq/fix/account-delete-error-handling
fix(webapp): account delete error handling response types
2026-02-04 21:31:38 +02:00
Ahmed Bouhuolia
0963394b04 fix(webapp): account delete error handling response types 2026-02-04 21:27:25 +02:00
Ahmed Bouhuolia
6cab0651fc Merge pull request #927 from bigcapitalhq/feature/20260202223150
fix(webapp): darkmode warehouses list page
2026-02-02 22:36:42 +02:00
Ahmed Bouhuolia
4af537d6dd fix(webapp): darkmode warehouses list page 2026-02-02 22:31:53 +02:00
Ahmed Bouhuolia
34db64612c Merge pull request #926 from bigcapitalhq/20260202-185120-9c84
fix(webapp): constrant not found row color
2026-02-02 18:53:48 +02:00
Ahmed Bouhuolia
10225bbfed fix(webapp): constrant not found row color 2026-02-02 18:51:52 +02:00
Ahmed Bouhuolia
c3a4fe6b37 Merge pull request #924 from bigcapitalhq/20260201-180532-f578
fix(webapp): normalize api path
2026-02-01 18:06:51 +02:00
Ahmed Bouhuolia
02be959461 fix(webapp): normalize api path 2026-02-01 18:05:51 +02:00
Ahmed Bouhuolia
d5bf56e333 Merge pull request #923 from bigcapitalhq/20260201-165255-f063
fix(server): copy .js migration files
2026-02-01 16:56:49 +02:00
Ahmed Bouhuolia
e3182c15b3 fix(server): copy .js migration files 2026-02-01 16:53:21 +02:00
Ahmed Bouhuolia
dfa63ece21 Merge pull request #921 from bigcapitalhq/20260131-145158-fd0c
fix(scripts): db migration dockerfile
2026-01-31 15:32:07 +02:00
Ahmed Bouhuolia
6e95bd7da1 fix(scripts): db migration dockerfile 2026-01-31 15:31:17 +02:00
Ahmed Bouhuolia
f51fffa5c7 Merge pull request #918 from bigcapitalhq/20260129-203653-75b0
feat(server): add bull ui board
2026-01-29 20:39:05 +02:00
Ahmed Bouhuolia
6193358cc3 feat(server): add bull ui board 2026-01-29 20:37:04 +02:00
Ahmed Bouhuolia
518abcd30d Merge pull request #917 from bigcapitalhq/20260128-195652-2287
fix: dockerfile build script
2026-01-28 23:42:24 +02:00
Ahmed Bouhuolia
7874b9f765 fix(ci): dockerfile build script 2026-01-28 23:40:32 +02:00
Ahmed Bouhuolia
02cc7e0c96 Merge pull request #916 from bigcapitalhq/20260128-181425-8b6a
fix(webapp): blueprintjs datetime version
2026-01-28 18:17:29 +02:00
Ahmed Bouhuolia
57cc513873 fix(webapp): blueprintjs datetime version 2026-01-28 18:14:44 +02:00
Ahmed Bouhuolia
f5bfdede30 Merge pull request #915 from bigcapitalhq/fix-vendor-customer-edit-opening-balance
fix(webapp): vendor/customer edit opening balance
2026-01-27 22:09:00 +02:00
Ahmed Bouhuolia
488556bb59 fix(webapp): vendor/customer edit opening balance 2026-01-27 22:06:57 +02:00
Ahmed Bouhuolia
0fc5a66e95 Merge pull request #914 from bigcapitalhq/fix-costable-inventory-transactions
fix(server): costable attr of inventory gl entries
2026-01-26 15:02:35 +02:00
Ahmed Bouhuolia
d9ae51027e fix(server): costable attr of inventory gl entries 2026-01-26 15:00:17 +02:00
Ahmed Bouhuolia
a92d6112d9 Merge pull request #913 from bigcapitalhq/feature/20260125222025
fix(server): sale receipt cost gl entries
2026-01-25 22:22:08 +02:00
Ahmed Bouhuolia
889b0cec4b fix(server): sale receipt cost gl entries 2026-01-25 22:20:28 +02:00
Ahmed Bouhuolia
1c4c41ebba Merge pull request #912 from bigcapitalhq/feature/20260125215941
fix(server): mark compute inventory cost flag
2026-01-25 22:02:13 +02:00
Ahmed Bouhuolia
421f0c26a7 fix(server): mark compute inventory cost flag 2026-01-25 21:59:44 +02:00
Ahmed Bouhuolia
f461cc221b Merge pull request #911 from bigcapitalhq/feature/20260125001703
fix(server): landed cost gl transactions
2026-01-25 00:19:07 +02:00
Ahmed Bouhuolia
acae75a912 fix(server): landed cost gl transactions 2026-01-25 00:17:14 +02:00
Ahmed Bouhuolia
b5a69971a9 Merge pull request #910 from bigcapitalhq/feature/20260123174320
fix(server): customer/vendor opening balance
2026-01-24 14:02:17 +02:00
Ahmed Bouhuolia
04d065b969 wip 2026-01-24 13:59:43 +02:00
Ahmed Bouhuolia
ca910ee489 fix(server): customer/vendor opening balance: 2026-01-23 17:43:22 +02:00
Ahmed Bouhuolia
e3cf6bf099 Merge pull request #908 from bigcapitalhq/feature/20260121133953
fix: bill response with entries
2026-01-21 13:40:53 +02:00
Ahmed Bouhuolia
6da7e8185c fix: bill response with entries 2026-01-21 13:39:56 +02:00
Ahmed Bouhuolia
785c49f2e6 Merge pull request #907 from bigcapitalhq/feature/20260121130702
hotbug(server): interceptors order
2026-01-21 13:08:18 +02:00
Ahmed Bouhuolia
d7331554ad hotbug(server): interceptors order 2026-01-21 13:07:03 +02:00
Ahmed Bouhuolia
78b1e9136a Merge pull request #897 from bigcapitalhq/more-e2e-test-cases
feat(server): more e2e test cases
2026-01-18 22:46:12 +02:00
Ahmed Bouhuolia
fea9bb5caa Merge remote-tracking branch 'refs/remotes/origin/more-e2e-test-cases' into more-e2e-test-cases 2026-01-18 22:44:17 +02:00
Ahmed Bouhuolia
db5caa138a wip 2026-01-18 22:43:54 +02:00
Ahmed Bouhuolia
bf821885c0 Merge branch 'develop' into more-e2e-test-cases 2026-01-18 15:01:49 +02:00
Ahmed Bouhuolia
5ce5d8b899 Merge pull request #906 from bigcapitalhq/move-app-filters
fix(server): move global filters, pipes, and interceptors to AppModule
2026-01-18 15:00:58 +02:00
Ahmed Bouhuolia
458093fca2 fix(server): move global filters, pipes, and interceptors to AppModule 2026-01-18 14:59:20 +02:00
Ahmed Bouhuolia
97e17848f8 Merge pull request #905 from bigcapitalhq/pagination-darkmode
fix(webapp): pagination darkmode
2026-01-17 23:35:36 +02:00
Ahmed Bouhuolia
3dfe884413 fix(webapp): pagination darkmode 2026-01-17 23:33:10 +02:00
Ahmed Bouhuolia
f26a59f0fb Merge pull request #904 from bigcapitalhq/fix-landed-cost-dialog
fix: landed cost dialog
2026-01-17 21:45:23 +02:00
Ahmed Bouhuolia
7ee161733f fix: landed cost dialog 2026-01-17 21:42:27 +02:00
Ahmed Bouhuolia
4efc0b3eb4 Merge pull request #903 from bigcapitalhq/fix-cancel-invoice-written-off
fix(webapp): cancel the written-off invoice
2026-01-16 19:10:26 +02:00
Ahmed Bouhuolia
532aa07e7f fix(webapp): cancel the written-off invoice 2026-01-16 19:08:07 +02:00
Ahmed Bouhuolia
abacb543c7 Merge pull request #902 from bigcapitalhq/fix-bank-transactions-unexclude2
fix(webapp): unexclude bank transactions
2026-01-16 18:54:36 +02:00
Ahmed Bouhuolia
769eaebc76 fix(webapp): unexclude bank transactions 2026-01-16 18:52:12 +02:00
Ahmed Bouhuolia
e0fb345a48 fix: improve banking transaction exclude/unexclude logic 2026-01-16 18:49:27 +02:00
Ahmed Bouhuolia
c21301061f wip 2026-01-16 00:23:16 +02:00
Ahmed Bouhuolia
2bbc154f18 wip 2026-01-15 22:04:51 +02:00
Ahmed Bouhuolia
3c1273becb wip 2026-01-12 01:04:28 +02:00
Ahmed Bouhuolia
16f1d57279 feat(server): more e2e test cases 2026-01-10 01:01:41 +02:00
Ahmed Bouhuolia
8726b4b3b0 Merge pull request #896 from bigcapitalhq/fix-server-build
fix(server): Dockerfile
2026-01-09 23:40:19 +02:00
Ahmed Bouhuolia
5ace03ea99 fix(server): Dockerfile 2026-01-09 23:38:52 +02:00
Ahmed Bouhuolia
5b6c473780 Merge pull request #895 from bigcapitalhq/fix-bank-accounts-filter
fix(server): bank accounts filter
2026-01-09 20:02:36 +02:00
Ahmed Bouhuolia
2186828516 fix(server): bank accounts filter 2026-01-09 20:00:44 +02:00
Ahmed Bouhuolia
3f2ab6e8f0 feat(webapp): add socket to vite server proxy 2026-01-08 22:43:42 +02:00
Ahmed Bouhuolia
f0fae7d148 Merge pull request #894 from bigcapitalhq/fix-refund-credit-notes
fix(server): refund credit note gl entries
2026-01-08 00:29:47 +02:00
Ahmed Bouhuolia
e063597a80 fix(server): refund credit note gl entries 2026-01-08 00:27:43 +02:00
Ahmed Bouhuolia
9b3f6b22d1 Merge pull request #893 from bigcapitalhq/bugs-bashing-3
fix: bugs bashing
2026-01-04 01:27:19 +02:00
Ahmed Bouhuolia
0475ce136a fix: bugs bashing
- Added English translations for customer types in `customer.json`.
- Updated `Model.ts` to improve deletion logic by filtering dependent relations.
- Introduced `BillPaymentBillSyncSubscriber` to handle bill payment events.
- Enhanced `CreateBillPaymentService` and `EditBillPaymentService` to fetch entries after insertion/updating.
- Updated `SaleInvoiceCostGLEntries` to include item entry details in GL entries.
- Refactored various components in the webapp for consistency in naming conventions.
2026-01-04 01:24:10 +02:00
Ahmed Bouhuolia
987ad992a4 Merge pull request #892 from bigcapitalhq/darkmode-ui-bugs
fix: darkmode ui bugs
2026-01-03 18:26:21 +02:00
Ahmed Bouhuolia
ee92c2815b fix: darkmode ui bugs 2026-01-03 18:24:33 +02:00
Ahmed Bouhuolia
5767f1f603 Merge pull request #890 from bigcapitalhq/named-imports-hocs
fix: account transactions don't show up
2026-01-01 22:16:02 +02:00
Ahmed Bouhuolia
885d8014c2 fix: account transactions don't show up 2026-01-01 22:13:47 +02:00
Ahmed Bouhuolia
3ffab896ed Merge pull request #889 from bigcapitalhq/revert-888-named-imports-hocs
Revert "fix: account transactions don't show up"
2026-01-01 22:13:31 +02:00
Ahmed Bouhuolia
92a5086f1f Revert "fix: account transactions don't show up" 2026-01-01 22:13:08 +02:00
Ahmed Bouhuolia
1bf9038ddc Merge pull request #888 from bigcapitalhq/named-imports-hocs
fix: account transactions don't show up
2026-01-01 22:11:56 +02:00
Ahmed Bouhuolia
2736b76ced fix: account transactions don't show up 2026-01-01 22:09:51 +02:00
Ahmed Bouhuolia
9e921b074f Merge pull request #887 from bigcapitalhq/named-imports-hocs
refactor: HOCs named imports
2026-01-01 22:00:58 +02:00
Ahmed Bouhuolia
0f377e19f3 refactor: HOCs named imports 2026-01-01 21:58:42 +02:00
Ahmed Bouhuolia
5d872798ff Merge pull request #886 from bigcapitalhq/fix-credit-note-print
fix: credit note printing
2026-01-01 17:21:36 +02:00
Ahmed Bouhuolia
0ef78a19fe fix: credit note printing 2026-01-01 17:19:06 +02:00
Ahmed Bouhuolia
70b0a4833c Merge pull request #885 from bigcapitalhq/refund-credit-notes
fix: refund credit notes
2026-01-01 17:05:36 +02:00
Ahmed Bouhuolia
ead4fc9b97 fix: refund credit notes 2026-01-01 17:03:48 +02:00
Ahmed Bouhuolia
a91a7c612f Merge pull request #882 from bigcapitalhq/bugs-bashing2
Bug fixes, refactoring, and improvements
2025-12-31 01:01:08 +02:00
Ahmed Bouhuolia
339289be9f refactor(export): move PDF table template to shared package 2025-12-29 23:54:43 +02:00
Ahmed Bouhuolia
350d229e98 feat(transactions-locking): enable settings schema and add dark mode support 2025-12-29 23:35:34 +02:00
Ahmed Bouhuolia
8152a16fd5 Merge pull request #881 from bigcapitalhq/bugs-bashing
bugs bashing
2025-12-29 22:08:56 +02:00
Ahmed Bouhuolia
00aad6e35c wip 2025-12-29 22:06:49 +02:00
Ahmed Bouhuolia
30d8fdb4c0 fix: running compute item cost processor 2025-12-28 12:30:06 +02:00
Ahmed Bouhuolia
872fc661ce bugs bashing 2025-12-28 12:01:24 +02:00
Ahmed Bouhuolia
054cd1fae4 Merge pull request #880 from bigcapitalhq/fix-dark-mode-bank-transaction-drawer
fix: darkmode bank transaction drawer
2025-12-23 20:00:50 +02:00
Ahmed Bouhuolia
7cb169bce9 fix: darkmode bank transaction drawer 2025-12-23 19:57:31 +02:00
Ahmed Bouhuolia
f2663c4af3 Merge pull request #879 from bigcapitalhq/refactor-bound-formik-fields
refactor(webapp): bound Formik fields
2025-12-22 23:28:17 +02:00
Ahmed Bouhuolia
6fea7779da refactor(webapp): bound Formik fields 2025-12-22 23:25:43 +02:00
Ahmed Bouhuolia
c00af18327 Merge pull request #878 from bigcapitalhq/fix-match-bank-transactions
fix: match uncategorized bank transactions
2025-12-22 23:06:36 +02:00
Ahmed Bouhuolia
37f0f4e227 fix: match uncategorized bank transactions 2025-12-22 23:02:08 +02:00
Ahmed Bouhuolia
8662c5899e Merge pull request #877 from bigcapitalhq/fix-import-bank-transactions
fix: import bank transactions
2025-12-22 22:52:34 +02:00
Ahmed Bouhuolia
a9a7cd8617 fix: import bank transactions 2025-12-22 22:49:58 +02:00
Ahmed Bouhuolia
e50fc3b523 Merge pull request #876 from bigcapitalhq/refactor-date-input
refactor: date input field
2025-12-21 23:39:09 +02:00
Ahmed Bouhuolia
b294a72a26 refactor: date input field 2025-12-21 23:34:11 +02:00
Ahmed Bouhuolia
62ae49941b Merge pull request #875 from bigcapitalhq/fix-accounts-suggest-field
fix: accounts suggest field
2025-12-21 16:15:39 +02:00
Ahmed Bouhuolia
31f5cbf335 fix: accounts suggest field 2025-12-21 16:11:01 +02:00
Ahmed Bouhuolia
b22328cff9 Merge pull request #874 from bigcapitalhq/feature/20251218134811
fix: import module bugs
2025-12-18 21:25:34 +02:00
Ahmed Bouhuolia
58f609353c fix: import bugs 2025-12-18 21:21:54 +02:00
Ahmed Bouhuolia
8a2a8eed3b fix: import rows aggregator 2025-12-18 20:44:05 +02:00
Ahmed Bouhuolia
636d206b0e fix: bugs sprint 2025-12-18 13:48:12 +02:00
Ahmed Bouhuolia
63922c391a fix: formatted money attributes 2025-12-14 16:51:06 +02:00
Ahmed Bouhuolia
6ecfe1ff12 fix: remove the auth body background 2025-12-14 14:30:25 +02:00
Ahmed Bouhuolia
17651e0768 Merge pull request #871 from bigcapitalhq/fix-payment-link-base-url
fix: generated payment link base url
2025-12-14 13:29:09 +02:00
Ahmed Bouhuolia
151b623771 fix: generated payment link base url 2025-12-14 13:26:34 +02:00
Ahmed Bouhuolia
2d4459c2f9 fix: payment portal page 2025-12-14 13:06:44 +02:00
Ahmed Bouhuolia
3cbdc3ec96 Merge pull request #870 from bigcapitalhq/report-pdf-template
fix: reports pdf template
2025-12-12 23:42:02 +02:00
Ahmed Bouhuolia
3cfb5cdde8 fix: reports pdf template 2025-12-12 23:38:48 +02:00
Ahmed Bouhuolia
736f2c4109 Merge pull request #869 from bigcapitalhq/fix-passing-number-format-to-reports
fix: passing number format to reports
2025-12-11 00:25:57 +02:00
Ahmed Bouhuolia
2e21437056 fix: update pnpm-lock.yaml 2025-12-11 00:23:50 +02:00
Ahmed Bouhuolia
340b78d968 fix: passing number format to reports 2025-12-11 00:19:55 +02:00
Ahmed Bouhuolia
d006362be2 fix: transaction locking handling 2025-12-05 23:47:29 +02:00
Ahmed Bouhuolia
bc21dcb37e fix(webapp): add api key button 2025-12-05 15:14:31 +02:00
Ahmed Bouhuolia
578b0deb3e fix: sending mail jobs (#868) 2025-12-05 00:09:11 +02:00
Ahmed Bouhuolia
c3dc26a1e4 fix: sending mail jobs 2025-12-05 00:07:26 +02:00
Ahmed Bouhuolia
32d74b0413 feat: onboarding pages darkmode (#867) 2025-12-03 16:04:46 +02:00
allcontributors[bot]
71b1206f8a docs: add Daniel15 as a contributor for bug, and code (#865)
* docs: update README.md [skip ci]

* docs: update .all-contributorsrc [skip ci]

---------

Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com>
2025-12-02 01:42:54 +02:00
Ahmed Bouhuolia
cb1bcaae77 Merge pull request #864 from Daniel15/patch-3
fix: Stripe integration
2025-12-02 01:41:04 +02:00
Ahmed Bouhuolia
eb51646035 fix: stripe payment webhooks 2025-12-02 01:26:58 +02:00
Ahmed Bouhuolia
8f54754aba feat: add stripe payment webhooks controller 2025-12-01 13:24:19 +02:00
Daniel Lo Nigro
0f446f90ca Change stripe_checkout_session to POST 2025-11-30 16:01:04 -08:00
Daniel Lo Nigro
7cb67b257b Cast payment_integration_id to number 2025-11-30 15:59:29 -08:00
Daniel Lo Nigro
0a1fffb3a4 Correctly register PaymentIntegration as tenant model 2025-11-30 15:52:07 -08:00
Daniel Lo Nigro
b756f090ed Fix Stripe redirect_uri 2025-11-30 15:50:42 -08:00
Daniel Lo Nigro
f9e49727fc Fix Stripe API URLs in webapp 2025-11-30 15:35:27 -08:00
Ahmed Bouhuolia
66969753b1 fix: seeds file-system directory 2025-11-30 22:59:48 +02:00
Ahmed Bouhuolia
3648fb3ffc fix: cloud subscription flag 2025-11-30 22:38:00 +02:00
Ahmed Bouhuolia
e196d485cf fix: filter pdf templates by resource 2025-11-26 22:25:42 +02:00
Ahmed Bouhuolia
74e46364ac feat: theme preloading and dark mode 2025-11-26 21:27:42 +02:00
Ahmed Bouhuolia
8817be4813 Merge remote-tracking branch 'refs/remotes/origin/develop' into develop 2025-11-25 23:46:51 +02:00
Ahmed Bouhuolia
cd4816aa3b fix: printing sale receipts 2025-11-25 23:46:41 +02:00
Ahmed Bouhuolia
82a2c74182 Merge pull request #859 from bigcapitalhq/rate-quantity-must-be-required
fix: rate and quantity of entries must not be empty
2025-11-25 22:03:25 +02:00
Ahmed Bouhuolia
e231efb9de fix: rate and quantity of entries must not be empty 2025-11-25 22:02:09 +02:00
Ahmed Bouhuolia
65ffc31ec0 Merge pull request #858 from bigcapitalhq/migrate-from-cra-to-vite
feat: migrate from CRA to Vite for speed
2025-11-25 21:36:28 +02:00
Ahmed Bouhuolia
dc6cf13a3e fix: wdyr error with vite 2025-11-25 21:34:17 +02:00
Ahmed Bouhuolia
adfa8852db wip 2025-11-25 21:29:32 +02:00
Ahmed Bouhuolia
ff04c4b762 wip 2025-11-24 18:58:50 +02:00
Ahmed Bouhuolia
fe4bd88f9f wip 2025-11-24 14:58:58 +02:00
Ahmed Bouhuolia
caf232d928 feat: migrate from CRA to Vite for speed 2025-11-24 14:19:05 +02:00
Ahmed Bouhuolia
234b1804b3 Merge pull request #855 from Daniel15/Daniel15-patch-1
Update commands in contributing docs
2025-11-21 11:25:29 +02:00
Daniel Lo Nigro
98b3b551c1 Update commands in contributing docs 2025-11-20 21:08:40 -08:00
Ahmed Bouhuolia
ceed9e453f feat: bulk transcations delete (#844)
* feat: bulk transcations delete
2025-11-20 23:11:06 +02:00
Ahmed Bouhuolia
43faa45dcf wip 2025-11-20 23:06:35 +02:00
Ahmed Bouhuolia
56e00d254b wip 2025-11-20 17:41:16 +02:00
Ahmed Bouhuolia
d90b6ffbe7 wip 2025-11-19 23:42:06 +02:00
Ahmed Bouhuolia
5eafd23bf8 wip 2025-11-19 22:59:30 +02:00
Ahmed Bouhuolia
2b384b2f6f wip 2025-11-19 22:59:21 +02:00
Ahmed Bouhuolia
17bcc14231 wip 2025-11-17 22:26:33 +02:00
Ahmed Bouhuolia
2c64e1b8ab wip 2025-11-17 17:04:25 +02:00
Daniel Lo Nigro
6f50138260 Improve Stripe example (#851) 2025-11-17 14:23:51 +02:00
Andres Maqueo
0a7d687f91 fix: docker/redis/Dockerfile to reduce vulnerabilities (#845)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-DEBIAN10-SYSTEMD-3339153
- https://snyk.io/vuln/SNYK-DEBIAN10-SYSTEMD-3339153
- https://snyk.io/vuln/SNYK-DEBIAN10-OPENSSL-2426310
- https://snyk.io/vuln/SNYK-DEBIAN10-OPENSSL-2807585
- https://snyk.io/vuln/SNYK-DEBIAN10-OPENSSL-1569403

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2025-11-17 14:22:00 +02:00
Ahmed Bouhuolia
2383091b6e wip 2025-11-12 21:34:30 +02:00
Ahmed Bouhuolia
e2f5d4c66e Merge pull request #848 from Daniel15/patch-1
[docker] Change BANKING_CONNECT to BANK_FEED_ENABLED
2025-11-12 09:31:08 +02:00
Daniel Lo Nigro
ce70234ebd [docker] Change BANKING_CONNECT to BANK_FEED_ENABLED 2025-11-11 22:57:42 -08:00
Ahmed Bouhuolia
80abd1f66f fix: edit/create account 2025-11-07 22:20:06 +02:00
Ahmed Bouhuolia
a0bc9db9a6 feat: bulk transcations delete 2025-11-03 21:40:24 +02:00
Ahmed Bouhuolia
8161439365 Merge pull request #843 from bigcapitalhq/refactor-fast-fields
feat: refactor FastField fields to binded Formik fields
2025-11-03 00:29:26 +02:00
Ahmed Bouhuolia
46871c8113 feat: refactor FastField fields to binded Formik fields 2025-11-03 00:27:32 +02:00
Ahmed Bouhuolia
a4aee58f93 Merge pull request #842 from bigcapitalhq/fix-auto-increment-transactions
fix: auto increment serial transactions
2025-11-02 21:10:04 +02:00
Ahmed Bouhuolia
f64875404a fix: auto increment serial transactions 2025-11-02 21:08:28 +02:00
Ahmed Bouhuolia
cca116f6bb Merge pull request #841 from bigcapitalhq/only-inactive-accounts-filter
fix: only inactive accounts filter
2025-11-02 20:00:14 +02:00
Ahmed Bouhuolia
fdec94a3f7 fix: only inactive accounts filter 2025-11-02 19:58:26 +02:00
Ahmed Bouhuolia
c66299aacd feat: darkmode preferences screens (#840) 2025-11-02 17:01:52 +02:00
Ahmed Bouhuolia
77dc0778a3 feat: darkmode preferences screens 2025-11-02 16:43:47 +02:00
Ahmed Bouhuolia
a76445a6eb feat: api keys ui (#839)
* feat: api keys ui
2025-11-02 12:41:16 +02:00
Ahmed Bouhuolia
41143d8bbd feat: api endpoints throttle (#837)
* feat: api endpoints throttle
2025-10-30 22:06:05 +02:00
Ahmed Bouhuolia
844a050c9a Merge pull request #836 from bigcapitalhq/auth-pages-errors-handler
fix: auth pages errors handler
2025-10-30 19:29:41 +02:00
Ahmed Bouhuolia
0588a30c88 fix: auth pages errors handler 2025-10-30 19:27:29 +02:00
Ahmed Bouhuolia
4a0091d3f8 Merge pull request #835 from bigcapitalhq/fix-edit-payment-transaction
fix: edit payment transaction
2025-10-29 12:57:24 +02:00
Ahmed Bouhuolia
fc89cfb14a fix: edit payment transaction 2025-10-29 12:54:12 +02:00
Ahmed Bouhuolia
98401b5a01 Merge pull request #438 from bigcapitalhq/BIG-166
feat: one-command setup script
2025-10-29 00:39:45 +02:00
Ahmed Bouhuolia
3afe4f470f Update setup.sh
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-29 00:39:26 +02:00
Ahmed Bouhuolia
b4281a71d4 Apply suggestion from @Copilot
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-29 00:39:15 +02:00
Ahmed Bouhuolia
87c127fabd Merge branch 'develop' into BIG-166 2025-10-29 00:35:33 +02:00
Ahmed Bouhuolia
50d9e8d375 Merge pull request #833 from bigcapitalhq/fix-more-bugs
fix: issues related to PUT operations
2025-10-28 18:14:00 +02:00
Ahmed Bouhuolia
4839a6dea8 Update packages/server/src/modules/Bills/commands/EditBill.service.ts
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-28 18:13:52 +02:00
Ahmed Bouhuolia
f736c3f9eb fix: issues related to PUT operations 2025-10-28 18:12:08 +02:00
Ahmed Bouhuolia
368c85a01a Merge pull request #832 from bigcapitalhq/validate-tenant-existance-in-guards
fix: validate request org id existance in guards
2025-10-25 15:20:42 +02:00
Ahmed Bouhuolia
5d792fea26 fix 2025-10-25 15:19:10 +02:00
Ahmed Bouhuolia
1bccba572a fix: validate request org id existance in guards 2025-10-25 15:15:13 +02:00
Ahmed Bouhuolia
900921e6ba Merge pull request #831 from bigcapitalhq/fix-tenant-build
fix: organization build db connection error
2025-10-25 14:59:28 +02:00
Ahmed Bouhuolia
2b4772a070 fix: organization build db connection error 2025-10-25 14:57:38 +02:00
Ahmed Bouhuolia
8852a4a0f8 Merge pull request #830 from bigcapitalhq/fix-system-tenant-migration
fix: seed migration issue
2025-10-25 14:02:19 +02:00
Ahmed Bouhuolia
1971b2ddc0 fix: seed migration issue 2025-10-25 13:58:56 +02:00
Ahmed Bouhuolia
fb6bfdee8e feat: optimize status tags (#829) 2025-10-23 12:42:16 +02:00
Ahmed Bouhuolia
5c466464a2 feat: optimize status tags 2025-10-23 12:40:08 +02:00
Ahmed Bouhuolia
3bd0e89146 feat: migration commands (#828)
* feat: migration commands

* Update packages/server/src/modules/CLI/commands/TenantsMigrateRollback.command.ts

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update packages/server/src/modules/CLI/commands/TenantsMigrateLatest.command.ts

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update packages/server/src/modules/CLI/commands/TenantsList.command.ts

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update packages/server/src/modules/CLI/commands/SystemMigrateRollback.command.ts

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update packages/server/src/modules/CLI/commands/TenantsMigrateLatest.command.ts

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-22 21:58:02 +02:00
Ahmed Bouhuolia
9d714ac78e fix: invoice details action bar divider 2025-10-21 00:33:58 +02:00
Ahmed Bouhuolia
0477133cda feat: darkmode skeleton and universal search 2025-10-21 00:14:31 +02:00
Ahmed Bouhuolia
ce01b8645b feat: init speckit for cursor 2025-10-18 21:38:21 +02:00
Ahmed Bouhuolia
7be2c2f705 Merge pull request #826 from bigcapitalhq/typecheck-github-action
feat: typecheck gh action
2025-10-18 19:15:08 +02:00
Ahmed Bouhuolia
8fd2ccc268 chore: update typecheck workflow to include shared packages and build step 2025-10-18 19:13:15 +02:00
Ahmed Bouhuolia
7a3a6fa28b fix: update import paths for case sensitivity 2025-10-18 19:10:05 +02:00
Ahmed Bouhuolia
e686fba695 fix: case sensitivity paths 2025-10-18 19:04:29 +02:00
Ahmed Bouhuolia
3f49d82dbe fix 2025-10-18 18:52:57 +02:00
Ahmed Bouhuolia
e548e7dc4a feat: typecheck gh action 2025-10-18 18:48:03 +02:00
Ahmed Bouhuolia
e4f51fb8a1 Merge pull request #816 from bigcapitalhq/darkmode
feat: Darkmode
2025-10-18 18:40:53 +02:00
Ahmed Bouhuolia
aff8155bd6 wip 2025-10-18 18:12:26 +02:00
Ahmed Bouhuolia
40395274d0 wip 2025-10-18 17:25:57 +02:00
Ahmed Bouhuolia
c1c8097c92 feat: sidebar items darkmode 2025-10-18 16:04:49 +02:00
Ahmed Bouhuolia
bf573bd633 Merge pull request #825 from bigcapitalhq/socket-connec
Socket connec
2025-10-18 13:32:52 +02:00
Ahmed Bouhuolia
803e3980d3 chore: update pnpm-lock.yaml to include new @nestjs/websockets and @nestjs/platform-socket.io versions, and remove CORS configuration from main.ts 2025-10-18 13:31:14 +02:00
Ahmed Bouhuolia
dbc71c2555 feat(server): socket module 2025-10-18 13:27:43 +02:00
Ahmed Bouhuolia
54400b223f wip 2025-10-18 13:27:05 +02:00
Ahmed Bouhuolia
dd941f1f45 wip 2025-09-07 23:47:07 +02:00
Ahmed Bouhuolia
3537de765d wip 2025-09-07 13:56:11 +02:00
Ahmed Bouhuolia
9a89d90f6e wip 2025-08-31 17:18:50 +02:00
Ahmed Bouhuolia
eebe98f43b wip 2025-08-06 16:15:28 +02:00
Ahmed Bouhuolia
ffff3a6872 wip 2025-08-06 16:13:24 +02:00
Ahmed Bouhuolia
6393f30735 wp 2025-08-04 13:41:14 +02:00
Ahmed Bouhuolia
d9a716a46f wip darkmode 2025-08-04 12:25:27 +02:00
Ahmed Bouhuolia
cd1a70ca94 feat: api key dto docs 2025-07-06 10:44:32 +02:00
Ahmed Bouhuolia
b332406218 Update README.md 2025-07-02 23:31:40 +02:00
Ahmed Bouhuolia
456a9e1ad9 feat: add header swagger docs 2025-07-02 17:42:17 +02:00
Ahmed Bouhuolia
b2d61160dd Merge branch 'api-keys' into develop 2025-07-02 08:31:21 +02:00
Ahmed Bouhuolia
adb1bea374 feat: use the same Authorization header for jwt and api key 2025-07-02 08:30:53 +02:00
Ahmed Bouhuolia
e2d5dcd489 Merge pull request #813 from bigcapitalhq/api-keys
feat: api keys
2025-07-01 23:49:43 +02:00
Ahmed Bouhuolia
5d96357042 feat: clean up items controller 2025-07-01 23:48:56 +02:00
Ahmed Bouhuolia
9457b3cda1 feat: api keys 2025-07-01 23:45:38 +02:00
Ahmed Bouhuolia
84cb7693c8 feat: api keys 2025-07-01 23:05:58 +02:00
Ahmed Bouhuolia
9f6e9e85a5 feat(server): endpoints swagger docs 2025-06-30 16:30:55 +02:00
Ahmed Bouhuolia
83e698acf3 fix:create customer/vendor 2025-06-29 16:55:02 +02:00
Ahmed Bouhuolia
fa5c3bd955 feat: deleteIfNoRelations 2025-06-28 22:35:29 +02:00
Ahmed Bouhuolia
0ca98c7ae4 fix: cycle dependecy 2025-06-27 02:18:01 +02:00
Ahmed Bouhuolia
0c0e1dc22e fix: invoice generate sharable link 2025-06-27 01:59:46 +02:00
Ahmed Bouhuolia
e7178a6575 fix: adjust contact balance 2025-06-26 17:04:46 +02:00
Ahmed Bouhuolia
6a39e9d71f feat: endpoints swagger document 2025-06-22 23:46:39 +02:00
Ahmed Bouhuolia
9aa1ed93ca feat: update endpoint swagger docs 2025-06-22 20:58:53 +02:00
Ahmed Bouhuolia
b8c9919799 fox: journal sheet 2025-06-21 21:10:05 +02:00
Ahmed Bouhuolia
e5701140e1 feat: swagger doc 2025-06-21 20:55:32 +02:00
Ahmed Bouhuolia
91976842a7 fix: AR/AP aging report 2025-06-21 20:15:42 +02:00
Ahmed Bouhuolia
4d52059dba feat: swagger document endpoints 2025-06-19 21:04:54 +02:00
Ahmed Bouhuolia
26c1f118c1 feat: more response docs 2025-06-19 00:49:43 +02:00
Ahmed Bouhuolia
437bcb8854 feat: models default views 2025-06-17 20:53:13 +02:00
Ahmed Bouhuolia
f624cf7ae6 feat: document more endpoints 2025-06-16 23:40:12 +02:00
Ahmed Bouhuolia
e057b4e2f0 feat: add swagger docs 2025-06-16 15:53:00 +02:00
Ahmed Bouhuolia
c4668d7d22 feat: add swagger docs for responses 2025-06-16 13:50:30 +02:00
Ahmed Bouhuolia
88ef60ef28 fix: delete inventory adjustment gl entries 2025-06-15 17:51:44 +02:00
Ahmed Bouhuolia
bbf9ef9bc2 fix: formatted transaction type 2025-06-15 15:22:19 +02:00
Ahmed Bouhuolia
bcae2dae03 feat: change the controllers tags 2025-06-13 01:57:53 +02:00
Ahmed Bouhuolia
ff93168d72 refactor(nestjs): landed cost 2025-06-11 14:04:37 +02:00
Ahmed Bouhuolia
1130975efd refactor(nestjs): landed cost 2025-06-10 17:08:32 +02:00
Ahmed Bouhuolia
fa180b3ac5 refactor: gl entries 2025-06-10 12:29:46 +02:00
Ahmed Bouhuolia
90d6bea9b9 fix: mail state 2025-06-09 15:37:20 +02:00
Ahmed Bouhuolia
4366bf478a refactor: mail templates 2025-06-08 16:49:03 +02:00
Ahmed Bouhuolia
0a57b6e20e fix: cashflow statement localization 2025-06-06 20:40:56 +02:00
Ahmed Bouhuolia
9a685ffe5d refactor: financial reports query dtos 2025-06-06 00:11:51 +02:00
Ahmed Bouhuolia
51988dba3b refactor(nestjs): bank transactions matching 2025-06-05 14:41:26 +02:00
Ahmed Bouhuolia
f87bd341e9 refactor(nestjs): banking modules 2025-06-03 21:42:09 +02:00
Ahmed Bouhuolia
5595478e19 refactor(nestjs): banking module 2025-06-02 21:32:53 +02:00
Ahmed Bouhuolia
7247b52fe5 refactor(nestjs): banking module 2025-06-02 15:41:41 +02:00
Ahmed Bouhuolia
deadd5ac80 refactor(nestjs): plaid banking syncing 2025-06-01 18:38:44 +02:00
Ahmed Bouhuolia
66a2261e50 refactor(nestjs): wip 2025-05-28 21:32:48 +02:00
Ahmed Bouhuolia
c51347d3ec refactor(nestjs): wip import module 2025-05-28 17:01:46 +02:00
Ahmed Bouhuolia
b7a3c42074 refactor(nestjs): wip 2025-05-27 15:42:27 +02:00
Ahmed Bouhuolia
83c9392b74 refactor(nestjs): wip dtos validation schema 2025-05-26 17:04:53 +02:00
Ahmed Bouhuolia
24bf3dd06d refactor(nestjs): validation schema dtos 2025-05-25 23:39:54 +02:00
Ahmed Bouhuolia
2b3f98d8fe refactor(nestjs): hook the new endpoints 2025-05-22 19:55:55 +02:00
Ahmed Bouhuolia
4e64a9eadb refactor(nestjs): pdf templates 2025-05-22 13:36:10 +02:00
Ahmed Bouhuolia
0823bfc4e9 refactor(nestjs): contacts module 2025-05-20 23:55:39 +02:00
Ahmed Bouhuolia
99fe5a6b0d refactor(nestjs): Implement users module 2025-05-20 17:55:58 +02:00
Ahmed Bouhuolia
8e2cd98689 fix: the menu labels 2024-05-12 18:32:19 +02:00
Ahmed Bouhuolia
f934797929 feat: one-command setup script 2024-05-12 18:07:38 +02:00
2228 changed files with 53037 additions and 20782 deletions

View File

@@ -168,6 +168,16 @@
"contributions": [
"bug"
]
},
{
"login": "Daniel15",
"name": "Daniel Lo Nigro",
"avatar_url": "https://avatars.githubusercontent.com/u/91933?v=4",
"profile": "https://d.sb/",
"contributions": [
"bug",
"code"
]
}
],
"contributorsPerLine": 7,

View File

@@ -0,0 +1,184 @@
---
description: Perform a non-destructive cross-artifact consistency and quality analysis across spec.md, plan.md, and tasks.md after task generation.
---
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Goal
Identify inconsistencies, duplications, ambiguities, and underspecified items across the three core artifacts (`spec.md`, `plan.md`, `tasks.md`) before implementation. This command MUST run only after `/tasks` has successfully produced a complete `tasks.md`.
## Operating Constraints
**STRICTLY READ-ONLY**: Do **not** modify any files. Output a structured analysis report. Offer an optional remediation plan (user must explicitly approve before any follow-up editing commands would be invoked manually).
**Constitution Authority**: The project constitution (`.specify/memory/constitution.md`) is **non-negotiable** within this analysis scope. Constitution conflicts are automatically CRITICAL and require adjustment of the spec, plan, or tasks—not dilution, reinterpretation, or silent ignoring of the principle. If a principle itself needs to change, that must occur in a separate, explicit constitution update outside `/analyze`.
## Execution Steps
### 1. Initialize Analysis Context
Run `.specify/scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` once from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS. Derive absolute paths:
- SPEC = FEATURE_DIR/spec.md
- PLAN = FEATURE_DIR/plan.md
- TASKS = FEATURE_DIR/tasks.md
Abort with an error message if any required file is missing (instruct the user to run missing prerequisite command).
For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
### 2. Load Artifacts (Progressive Disclosure)
Load only the minimal necessary context from each artifact:
**From spec.md:**
- Overview/Context
- Functional Requirements
- Non-Functional Requirements
- User Stories
- Edge Cases (if present)
**From plan.md:**
- Architecture/stack choices
- Data Model references
- Phases
- Technical constraints
**From tasks.md:**
- Task IDs
- Descriptions
- Phase grouping
- Parallel markers [P]
- Referenced file paths
**From constitution:**
- Load `.specify/memory/constitution.md` for principle validation
### 3. Build Semantic Models
Create internal representations (do not include raw artifacts in output):
- **Requirements inventory**: Each functional + non-functional requirement with a stable key (derive slug based on imperative phrase; e.g., "User can upload file" → `user-can-upload-file`)
- **User story/action inventory**: Discrete user actions with acceptance criteria
- **Task coverage mapping**: Map each task to one or more requirements or stories (inference by keyword / explicit reference patterns like IDs or key phrases)
- **Constitution rule set**: Extract principle names and MUST/SHOULD normative statements
### 4. Detection Passes (Token-Efficient Analysis)
Focus on high-signal findings. Limit to 50 findings total; aggregate remainder in overflow summary.
#### A. Duplication Detection
- Identify near-duplicate requirements
- Mark lower-quality phrasing for consolidation
#### B. Ambiguity Detection
- Flag vague adjectives (fast, scalable, secure, intuitive, robust) lacking measurable criteria
- Flag unresolved placeholders (TODO, TKTK, ???, `<placeholder>`, etc.)
#### C. Underspecification
- Requirements with verbs but missing object or measurable outcome
- User stories missing acceptance criteria alignment
- Tasks referencing files or components not defined in spec/plan
#### D. Constitution Alignment
- Any requirement or plan element conflicting with a MUST principle
- Missing mandated sections or quality gates from constitution
#### E. Coverage Gaps
- Requirements with zero associated tasks
- Tasks with no mapped requirement/story
- Non-functional requirements not reflected in tasks (e.g., performance, security)
#### F. Inconsistency
- Terminology drift (same concept named differently across files)
- Data entities referenced in plan but absent in spec (or vice versa)
- Task ordering contradictions (e.g., integration tasks before foundational setup tasks without dependency note)
- Conflicting requirements (e.g., one requires Next.js while other specifies Vue)
### 5. Severity Assignment
Use this heuristic to prioritize findings:
- **CRITICAL**: Violates constitution MUST, missing core spec artifact, or requirement with zero coverage that blocks baseline functionality
- **HIGH**: Duplicate or conflicting requirement, ambiguous security/performance attribute, untestable acceptance criterion
- **MEDIUM**: Terminology drift, missing non-functional task coverage, underspecified edge case
- **LOW**: Style/wording improvements, minor redundancy not affecting execution order
### 6. Produce Compact Analysis Report
Output a Markdown report (no file writes) with the following structure:
## Specification Analysis Report
| ID | Category | Severity | Location(s) | Summary | Recommendation |
|----|----------|----------|-------------|---------|----------------|
| A1 | Duplication | HIGH | spec.md:L120-134 | Two similar requirements ... | Merge phrasing; keep clearer version |
(Add one row per finding; generate stable IDs prefixed by category initial.)
**Coverage Summary Table:**
| Requirement Key | Has Task? | Task IDs | Notes |
|-----------------|-----------|----------|-------|
**Constitution Alignment Issues:** (if any)
**Unmapped Tasks:** (if any)
**Metrics:**
- Total Requirements
- Total Tasks
- Coverage % (requirements with >=1 task)
- Ambiguity Count
- Duplication Count
- Critical Issues Count
### 7. Provide Next Actions
At end of report, output a concise Next Actions block:
- If CRITICAL issues exist: Recommend resolving before `/implement`
- If only LOW/MEDIUM: User may proceed, but provide improvement suggestions
- Provide explicit command suggestions: e.g., "Run /specify with refinement", "Run /plan to adjust architecture", "Manually edit tasks.md to add coverage for 'performance-metrics'"
### 8. Offer Remediation
Ask the user: "Would you like me to suggest concrete remediation edits for the top N issues?" (Do NOT apply them automatically.)
## Operating Principles
### Context Efficiency
- **Minimal high-signal tokens**: Focus on actionable findings, not exhaustive documentation
- **Progressive disclosure**: Load artifacts incrementally; don't dump all content into analysis
- **Token-efficient output**: Limit findings table to 50 rows; summarize overflow
- **Deterministic results**: Rerunning without changes should produce consistent IDs and counts
### Analysis Guidelines
- **NEVER modify files** (this is read-only analysis)
- **NEVER hallucinate missing sections** (if absent, report them accurately)
- **Prioritize constitution violations** (these are always CRITICAL)
- **Use examples over exhaustive rules** (cite specific instances, not generic patterns)
- **Report zero issues gracefully** (emit success report with coverage statistics)
## Context
$ARGUMENTS

View File

@@ -0,0 +1,287 @@
---
description: Generate a custom checklist for the current feature based on user requirements.
---
## Checklist Purpose: "Unit Tests for English"
**CRITICAL CONCEPT**: Checklists are **UNIT TESTS FOR REQUIREMENTS WRITING** - they validate the quality, clarity, and completeness of requirements in a given domain.
**NOT for verification/testing**:
- ❌ NOT "Verify the button clicks correctly"
- ❌ NOT "Test error handling works"
- ❌ NOT "Confirm the API returns 200"
- ❌ NOT checking if code/implementation matches the spec
**FOR requirements quality validation**:
- ✅ "Are visual hierarchy requirements defined for all card types?" (completeness)
- ✅ "Is 'prominent display' quantified with specific sizing/positioning?" (clarity)
- ✅ "Are hover state requirements consistent across all interactive elements?" (consistency)
- ✅ "Are accessibility requirements defined for keyboard navigation?" (coverage)
- ✅ "Does the spec define what happens when logo image fails to load?" (edge cases)
**Metaphor**: If your spec is code written in English, the checklist is its unit test suite. You're testing whether the requirements are well-written, complete, unambiguous, and ready for implementation - NOT whether the implementation works.
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Execution Steps
1. **Setup**: Run `.specify/scripts/bash/check-prerequisites.sh --json` from repo root and parse JSON for FEATURE_DIR and AVAILABLE_DOCS list.
- All file paths must be absolute.
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
2. **Clarify intent (dynamic)**: Derive up to THREE initial contextual clarifying questions (no pre-baked catalog). They MUST:
- Be generated from the user's phrasing + extracted signals from spec/plan/tasks
- Only ask about information that materially changes checklist content
- Be skipped individually if already unambiguous in `$ARGUMENTS`
- Prefer precision over breadth
Generation algorithm:
1. Extract signals: feature domain keywords (e.g., auth, latency, UX, API), risk indicators ("critical", "must", "compliance"), stakeholder hints ("QA", "review", "security team"), and explicit deliverables ("a11y", "rollback", "contracts").
2. Cluster signals into candidate focus areas (max 4) ranked by relevance.
3. Identify probable audience & timing (author, reviewer, QA, release) if not explicit.
4. Detect missing dimensions: scope breadth, depth/rigor, risk emphasis, exclusion boundaries, measurable acceptance criteria.
5. Formulate questions chosen from these archetypes:
- Scope refinement (e.g., "Should this include integration touchpoints with X and Y or stay limited to local module correctness?")
- Risk prioritization (e.g., "Which of these potential risk areas should receive mandatory gating checks?")
- Depth calibration (e.g., "Is this a lightweight pre-commit sanity list or a formal release gate?")
- Audience framing (e.g., "Will this be used by the author only or peers during PR review?")
- Boundary exclusion (e.g., "Should we explicitly exclude performance tuning items this round?")
- Scenario class gap (e.g., "No recovery flows detected—are rollback / partial failure paths in scope?")
Question formatting rules:
- If presenting options, generate a compact table with columns: Option | Candidate | Why It Matters
- Limit to AE options maximum; omit table if a free-form answer is clearer
- Never ask the user to restate what they already said
- Avoid speculative categories (no hallucination). If uncertain, ask explicitly: "Confirm whether X belongs in scope."
Defaults when interaction impossible:
- Depth: Standard
- Audience: Reviewer (PR) if code-related; Author otherwise
- Focus: Top 2 relevance clusters
Output the questions (label Q1/Q2/Q3). After answers: if ≥2 scenario classes (Alternate / Exception / Recovery / Non-Functional domain) remain unclear, you MAY ask up to TWO more targeted followups (Q4/Q5) with a one-line justification each (e.g., "Unresolved recovery path risk"). Do not exceed five total questions. Skip escalation if user explicitly declines more.
3. **Understand user request**: Combine `$ARGUMENTS` + clarifying answers:
- Derive checklist theme (e.g., security, review, deploy, ux)
- Consolidate explicit must-have items mentioned by user
- Map focus selections to category scaffolding
- Infer any missing context from spec/plan/tasks (do NOT hallucinate)
4. **Load feature context**: Read from FEATURE_DIR:
- spec.md: Feature requirements and scope
- plan.md (if exists): Technical details, dependencies
- tasks.md (if exists): Implementation tasks
**Context Loading Strategy**:
- Load only necessary portions relevant to active focus areas (avoid full-file dumping)
- Prefer summarizing long sections into concise scenario/requirement bullets
- Use progressive disclosure: add follow-on retrieval only if gaps detected
- If source docs are large, generate interim summary items instead of embedding raw text
5. **Generate checklist** - Create "Unit Tests for Requirements":
- Create `FEATURE_DIR/checklists/` directory if it doesn't exist
- Generate unique checklist filename:
- Use short, descriptive name based on domain (e.g., `ux.md`, `api.md`, `security.md`)
- Format: `[domain].md`
- If file exists, append to existing file
- Number items sequentially starting from CHK001
- Each `/speckit.checklist` run creates a NEW file (never overwrites existing checklists)
**CORE PRINCIPLE - Test the Requirements, Not the Implementation**:
Every checklist item MUST evaluate the REQUIREMENTS THEMSELVES for:
- **Completeness**: Are all necessary requirements present?
- **Clarity**: Are requirements unambiguous and specific?
- **Consistency**: Do requirements align with each other?
- **Measurability**: Can requirements be objectively verified?
- **Coverage**: Are all scenarios/edge cases addressed?
**Category Structure** - Group items by requirement quality dimensions:
- **Requirement Completeness** (Are all necessary requirements documented?)
- **Requirement Clarity** (Are requirements specific and unambiguous?)
- **Requirement Consistency** (Do requirements align without conflicts?)
- **Acceptance Criteria Quality** (Are success criteria measurable?)
- **Scenario Coverage** (Are all flows/cases addressed?)
- **Edge Case Coverage** (Are boundary conditions defined?)
- **Non-Functional Requirements** (Performance, Security, Accessibility, etc. - are they specified?)
- **Dependencies & Assumptions** (Are they documented and validated?)
- **Ambiguities & Conflicts** (What needs clarification?)
**HOW TO WRITE CHECKLIST ITEMS - "Unit Tests for English"**:
**WRONG** (Testing implementation):
- "Verify landing page displays 3 episode cards"
- "Test hover states work on desktop"
- "Confirm logo click navigates home"
**CORRECT** (Testing requirements quality):
- "Are the exact number and layout of featured episodes specified?" [Completeness]
- "Is 'prominent display' quantified with specific sizing/positioning?" [Clarity]
- "Are hover state requirements consistent across all interactive elements?" [Consistency]
- "Are keyboard navigation requirements defined for all interactive UI?" [Coverage]
- "Is the fallback behavior specified when logo image fails to load?" [Edge Cases]
- "Are loading states defined for asynchronous episode data?" [Completeness]
- "Does the spec define visual hierarchy for competing UI elements?" [Clarity]
**ITEM STRUCTURE**:
Each item should follow this pattern:
- Question format asking about requirement quality
- Focus on what's WRITTEN (or not written) in the spec/plan
- Include quality dimension in brackets [Completeness/Clarity/Consistency/etc.]
- Reference spec section `[Spec §X.Y]` when checking existing requirements
- Use `[Gap]` marker when checking for missing requirements
**EXAMPLES BY QUALITY DIMENSION**:
Completeness:
- "Are error handling requirements defined for all API failure modes? [Gap]"
- "Are accessibility requirements specified for all interactive elements? [Completeness]"
- "Are mobile breakpoint requirements defined for responsive layouts? [Gap]"
Clarity:
- "Is 'fast loading' quantified with specific timing thresholds? [Clarity, Spec §NFR-2]"
- "Are 'related episodes' selection criteria explicitly defined? [Clarity, Spec §FR-5]"
- "Is 'prominent' defined with measurable visual properties? [Ambiguity, Spec §FR-4]"
Consistency:
- "Do navigation requirements align across all pages? [Consistency, Spec §FR-10]"
- "Are card component requirements consistent between landing and detail pages? [Consistency]"
Coverage:
- "Are requirements defined for zero-state scenarios (no episodes)? [Coverage, Edge Case]"
- "Are concurrent user interaction scenarios addressed? [Coverage, Gap]"
- "Are requirements specified for partial data loading failures? [Coverage, Exception Flow]"
Measurability:
- "Are visual hierarchy requirements measurable/testable? [Acceptance Criteria, Spec §FR-1]"
- "Can 'balanced visual weight' be objectively verified? [Measurability, Spec §FR-2]"
**Scenario Classification & Coverage** (Requirements Quality Focus):
- Check if requirements exist for: Primary, Alternate, Exception/Error, Recovery, Non-Functional scenarios
- For each scenario class, ask: "Are [scenario type] requirements complete, clear, and consistent?"
- If scenario class missing: "Are [scenario type] requirements intentionally excluded or missing? [Gap]"
- Include resilience/rollback when state mutation occurs: "Are rollback requirements defined for migration failures? [Gap]"
**Traceability Requirements**:
- MINIMUM: ≥80% of items MUST include at least one traceability reference
- Each item should reference: spec section `[Spec §X.Y]`, or use markers: `[Gap]`, `[Ambiguity]`, `[Conflict]`, `[Assumption]`
- If no ID system exists: "Is a requirement & acceptance criteria ID scheme established? [Traceability]"
**Surface & Resolve Issues** (Requirements Quality Problems):
Ask questions about the requirements themselves:
- Ambiguities: "Is the term 'fast' quantified with specific metrics? [Ambiguity, Spec §NFR-1]"
- Conflicts: "Do navigation requirements conflict between §FR-10 and §FR-10a? [Conflict]"
- Assumptions: "Is the assumption of 'always available podcast API' validated? [Assumption]"
- Dependencies: "Are external podcast API requirements documented? [Dependency, Gap]"
- Missing definitions: "Is 'visual hierarchy' defined with measurable criteria? [Gap]"
**Content Consolidation**:
- Soft cap: If raw candidate items > 40, prioritize by risk/impact
- Merge near-duplicates checking the same requirement aspect
- If >5 low-impact edge cases, create one item: "Are edge cases X, Y, Z addressed in requirements? [Coverage]"
**🚫 ABSOLUTELY PROHIBITED** - These make it an implementation test, not a requirements test:
- ❌ Any item starting with "Verify", "Test", "Confirm", "Check" + implementation behavior
- ❌ References to code execution, user actions, system behavior
- ❌ "Displays correctly", "works properly", "functions as expected"
- ❌ "Click", "navigate", "render", "load", "execute"
- ❌ Test cases, test plans, QA procedures
- ❌ Implementation details (frameworks, APIs, algorithms)
**✅ REQUIRED PATTERNS** - These test requirements quality:
- ✅ "Are [requirement type] defined/specified/documented for [scenario]?"
- ✅ "Is [vague term] quantified/clarified with specific criteria?"
- ✅ "Are requirements consistent between [section A] and [section B]?"
- ✅ "Can [requirement] be objectively measured/verified?"
- ✅ "Are [edge cases/scenarios] addressed in requirements?"
- ✅ "Does the spec define [missing aspect]?"
6. **Structure Reference**: Generate the checklist following the canonical template in `.specify/templates/checklist-template.md` for title, meta section, category headings, and ID formatting. If template is unavailable, use: H1 title, purpose/created meta lines, `##` category sections containing `- [ ] CHK### <requirement item>` lines with globally incrementing IDs starting at CHK001.
7. **Report**: Output full path to created checklist, item count, and remind user that each run creates a new file. Summarize:
- Focus areas selected
- Depth level
- Actor/timing
- Any explicit user-specified must-have items incorporated
**Important**: Each `/speckit.checklist` command invocation creates a checklist file using short, descriptive names unless file already exists. This allows:
- Multiple checklists of different types (e.g., `ux.md`, `test.md`, `security.md`)
- Simple, memorable filenames that indicate checklist purpose
- Easy identification and navigation in the `checklists/` folder
To avoid clutter, use descriptive types and clean up obsolete checklists when done.
## Example Checklist Types & Sample Items
**UX Requirements Quality:** `ux.md`
Sample items (testing the requirements, NOT the implementation):
- "Are visual hierarchy requirements defined with measurable criteria? [Clarity, Spec §FR-1]"
- "Is the number and positioning of UI elements explicitly specified? [Completeness, Spec §FR-1]"
- "Are interaction state requirements (hover, focus, active) consistently defined? [Consistency]"
- "Are accessibility requirements specified for all interactive elements? [Coverage, Gap]"
- "Is fallback behavior defined when images fail to load? [Edge Case, Gap]"
- "Can 'prominent display' be objectively measured? [Measurability, Spec §FR-4]"
**API Requirements Quality:** `api.md`
Sample items:
- "Are error response formats specified for all failure scenarios? [Completeness]"
- "Are rate limiting requirements quantified with specific thresholds? [Clarity]"
- "Are authentication requirements consistent across all endpoints? [Consistency]"
- "Are retry/timeout requirements defined for external dependencies? [Coverage, Gap]"
- "Is versioning strategy documented in requirements? [Gap]"
**Performance Requirements Quality:** `performance.md`
Sample items:
- "Are performance requirements quantified with specific metrics? [Clarity]"
- "Are performance targets defined for all critical user journeys? [Coverage]"
- "Are performance requirements under different load conditions specified? [Completeness]"
- "Can performance requirements be objectively measured? [Measurability]"
- "Are degradation requirements defined for high-load scenarios? [Edge Case, Gap]"
**Security Requirements Quality:** `security.md`
Sample items:
- "Are authentication requirements specified for all protected resources? [Coverage]"
- "Are data protection requirements defined for sensitive information? [Completeness]"
- "Is the threat model documented and requirements aligned to it? [Traceability]"
- "Are security requirements consistent with compliance obligations? [Consistency]"
- "Are security failure/breach response requirements defined? [Gap, Exception Flow]"
## Anti-Examples: What NOT To Do
**❌ WRONG - These test implementation, not requirements:**
```markdown
- [ ] CHK001 - Verify landing page displays 3 episode cards [Spec §FR-001]
- [ ] CHK002 - Test hover states work correctly on desktop [Spec §FR-003]
- [ ] CHK003 - Confirm logo click navigates to home page [Spec §FR-010]
- [ ] CHK004 - Check that related episodes section shows 3-5 items [Spec §FR-005]
```
**✅ CORRECT - These test requirements quality:**
```markdown
- [ ] CHK001 - Are the number and layout of featured episodes explicitly specified? [Completeness, Spec §FR-001]
- [ ] CHK002 - Are hover state requirements consistently defined for all interactive elements? [Consistency, Spec §FR-003]
- [ ] CHK003 - Are navigation requirements clear for all clickable brand elements? [Clarity, Spec §FR-010]
- [ ] CHK004 - Is the selection criteria for related episodes documented? [Gap, Spec §FR-005]
- [ ] CHK005 - Are loading state requirements defined for asynchronous episode data? [Gap]
- [ ] CHK006 - Can "visual hierarchy" requirements be objectively measured? [Measurability, Spec §FR-001]
```
**Key Differences:**
- Wrong: Tests if the system works correctly
- Correct: Tests if the requirements are written correctly
- Wrong: Verification of behavior
- Correct: Validation of requirement quality
- Wrong: "Does it do X?"
- Correct: "Is X clearly specified?"

View File

@@ -0,0 +1,176 @@
---
description: Identify underspecified areas in the current feature spec by asking up to 5 highly targeted clarification questions and encoding answers back into the spec.
---
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Outline
Goal: Detect and reduce ambiguity or missing decision points in the active feature specification and record the clarifications directly in the spec file.
Note: This clarification workflow is expected to run (and be completed) BEFORE invoking `/speckit.plan`. If the user explicitly states they are skipping clarification (e.g., exploratory spike), you may proceed, but must warn that downstream rework risk increases.
Execution steps:
1. Run `.specify/scripts/bash/check-prerequisites.sh --json --paths-only` from repo root **once** (combined `--json --paths-only` mode / `-Json -PathsOnly`). Parse minimal JSON payload fields:
- `FEATURE_DIR`
- `FEATURE_SPEC`
- (Optionally capture `IMPL_PLAN`, `TASKS` for future chained flows.)
- If JSON parsing fails, abort and instruct user to re-run `/speckit.specify` or verify feature branch environment.
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
2. Load the current spec file. Perform a structured ambiguity & coverage scan using this taxonomy. For each category, mark status: Clear / Partial / Missing. Produce an internal coverage map used for prioritization (do not output raw map unless no questions will be asked).
Functional Scope & Behavior:
- Core user goals & success criteria
- Explicit out-of-scope declarations
- User roles / personas differentiation
Domain & Data Model:
- Entities, attributes, relationships
- Identity & uniqueness rules
- Lifecycle/state transitions
- Data volume / scale assumptions
Interaction & UX Flow:
- Critical user journeys / sequences
- Error/empty/loading states
- Accessibility or localization notes
Non-Functional Quality Attributes:
- Performance (latency, throughput targets)
- Scalability (horizontal/vertical, limits)
- Reliability & availability (uptime, recovery expectations)
- Observability (logging, metrics, tracing signals)
- Security & privacy (authN/Z, data protection, threat assumptions)
- Compliance / regulatory constraints (if any)
Integration & External Dependencies:
- External services/APIs and failure modes
- Data import/export formats
- Protocol/versioning assumptions
Edge Cases & Failure Handling:
- Negative scenarios
- Rate limiting / throttling
- Conflict resolution (e.g., concurrent edits)
Constraints & Tradeoffs:
- Technical constraints (language, storage, hosting)
- Explicit tradeoffs or rejected alternatives
Terminology & Consistency:
- Canonical glossary terms
- Avoided synonyms / deprecated terms
Completion Signals:
- Acceptance criteria testability
- Measurable Definition of Done style indicators
Misc / Placeholders:
- TODO markers / unresolved decisions
- Ambiguous adjectives ("robust", "intuitive") lacking quantification
For each category with Partial or Missing status, add a candidate question opportunity unless:
- Clarification would not materially change implementation or validation strategy
- Information is better deferred to planning phase (note internally)
3. Generate (internally) a prioritized queue of candidate clarification questions (maximum 5). Do NOT output them all at once. Apply these constraints:
- Maximum of 10 total questions across the whole session.
- Each question must be answerable with EITHER:
* A short multiplechoice selection (25 distinct, mutually exclusive options), OR
* A one-word / shortphrase answer (explicitly constrain: "Answer in <=5 words").
- Only include questions whose answers materially impact architecture, data modeling, task decomposition, test design, UX behavior, operational readiness, or compliance validation.
- Ensure category coverage balance: attempt to cover the highest impact unresolved categories first; avoid asking two low-impact questions when a single high-impact area (e.g., security posture) is unresolved.
- Exclude questions already answered, trivial stylistic preferences, or plan-level execution details (unless blocking correctness).
- Favor clarifications that reduce downstream rework risk or prevent misaligned acceptance tests.
- If more than 5 categories remain unresolved, select the top 5 by (Impact * Uncertainty) heuristic.
4. Sequential questioning loop (interactive):
- Present EXACTLY ONE question at a time.
- For multiplechoice questions:
* **Analyze all options** and determine the **most suitable option** based on:
- Best practices for the project type
- Common patterns in similar implementations
- Risk reduction (security, performance, maintainability)
- Alignment with any explicit project goals or constraints visible in the spec
* Present your **recommended option prominently** at the top with clear reasoning (1-2 sentences explaining why this is the best choice).
* Format as: `**Recommended:** Option [X] - <reasoning>`
* Then render all options as a Markdown table:
| Option | Description |
|--------|-------------|
| A | <Option A description> |
| B | <Option B description> |
| C | <Option C description> | (add D/E as needed up to 5)
| Short | Provide a different short answer (<=5 words) | (Include only if free-form alternative is appropriate)
* After the table, add: `You can reply with the option letter (e.g., "A"), accept the recommendation by saying "yes" or "recommended", or provide your own short answer.`
- For shortanswer style (no meaningful discrete options):
* Provide your **suggested answer** based on best practices and context.
* Format as: `**Suggested:** <your proposed answer> - <brief reasoning>`
* Then output: `Format: Short answer (<=5 words). You can accept the suggestion by saying "yes" or "suggested", or provide your own answer.`
- After the user answers:
* If the user replies with "yes", "recommended", or "suggested", use your previously stated recommendation/suggestion as the answer.
* Otherwise, validate the answer maps to one option or fits the <=5 word constraint.
* If ambiguous, ask for a quick disambiguation (count still belongs to same question; do not advance).
* Once satisfactory, record it in working memory (do not yet write to disk) and move to the next queued question.
- Stop asking further questions when:
* All critical ambiguities resolved early (remaining queued items become unnecessary), OR
* User signals completion ("done", "good", "no more"), OR
* You reach 5 asked questions.
- Never reveal future queued questions in advance.
- If no valid questions exist at start, immediately report no critical ambiguities.
5. Integration after EACH accepted answer (incremental update approach):
- Maintain in-memory representation of the spec (loaded once at start) plus the raw file contents.
- For the first integrated answer in this session:
* Ensure a `## Clarifications` section exists (create it just after the highest-level contextual/overview section per the spec template if missing).
* Under it, create (if not present) a `### Session YYYY-MM-DD` subheading for today.
- Append a bullet line immediately after acceptance: `- Q: <question> → A: <final answer>`.
- Then immediately apply the clarification to the most appropriate section(s):
* Functional ambiguity → Update or add a bullet in Functional Requirements.
* User interaction / actor distinction → Update User Stories or Actors subsection (if present) with clarified role, constraint, or scenario.
* Data shape / entities → Update Data Model (add fields, types, relationships) preserving ordering; note added constraints succinctly.
* Non-functional constraint → Add/modify measurable criteria in Non-Functional / Quality Attributes section (convert vague adjective to metric or explicit target).
* Edge case / negative flow → Add a new bullet under Edge Cases / Error Handling (or create such subsection if template provides placeholder for it).
* Terminology conflict → Normalize term across spec; retain original only if necessary by adding `(formerly referred to as "X")` once.
- If the clarification invalidates an earlier ambiguous statement, replace that statement instead of duplicating; leave no obsolete contradictory text.
- Save the spec file AFTER each integration to minimize risk of context loss (atomic overwrite).
- Preserve formatting: do not reorder unrelated sections; keep heading hierarchy intact.
- Keep each inserted clarification minimal and testable (avoid narrative drift).
6. Validation (performed after EACH write plus final pass):
- Clarifications session contains exactly one bullet per accepted answer (no duplicates).
- Total asked (accepted) questions ≤ 5.
- Updated sections contain no lingering vague placeholders the new answer was meant to resolve.
- No contradictory earlier statement remains (scan for now-invalid alternative choices removed).
- Markdown structure valid; only allowed new headings: `## Clarifications`, `### Session YYYY-MM-DD`.
- Terminology consistency: same canonical term used across all updated sections.
7. Write the updated spec back to `FEATURE_SPEC`.
8. Report completion (after questioning loop ends or early termination):
- Number of questions asked & answered.
- Path to updated spec.
- Sections touched (list names).
- Coverage summary table listing each taxonomy category with Status: Resolved (was Partial/Missing and addressed), Deferred (exceeds question quota or better suited for planning), Clear (already sufficient), Outstanding (still Partial/Missing but low impact).
- If any Outstanding or Deferred remain, recommend whether to proceed to `/speckit.plan` or run `/speckit.clarify` again later post-plan.
- Suggested next command.
Behavior rules:
- If no meaningful ambiguities found (or all potential questions would be low-impact), respond: "No critical ambiguities detected worth formal clarification." and suggest proceeding.
- If spec file missing, instruct user to run `/speckit.specify` first (do not create a new spec here).
- Never exceed 5 total asked questions (clarification retries for a single question do not count as new questions).
- Avoid speculative tech stack questions unless the absence blocks functional clarity.
- Respect user early termination signals ("stop", "done", "proceed").
- If no questions asked due to full coverage, output a compact coverage summary (all categories Clear) then suggest advancing.
- If quota reached with unresolved high-impact categories remaining, explicitly flag them under Deferred with rationale.
Context for prioritization: $ARGUMENTS

View File

@@ -0,0 +1,77 @@
---
description: Create or update the project constitution from interactive or provided principle inputs, ensuring all dependent templates stay in sync.
---
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Outline
You are updating the project constitution at `.specify/memory/constitution.md`. This file is a TEMPLATE containing placeholder tokens in square brackets (e.g. `[PROJECT_NAME]`, `[PRINCIPLE_1_NAME]`). Your job is to (a) collect/derive concrete values, (b) fill the template precisely, and (c) propagate any amendments across dependent artifacts.
Follow this execution flow:
1. Load the existing constitution template at `.specify/memory/constitution.md`.
- Identify every placeholder token of the form `[ALL_CAPS_IDENTIFIER]`.
**IMPORTANT**: The user might require less or more principles than the ones used in the template. If a number is specified, respect that - follow the general template. You will update the doc accordingly.
2. Collect/derive values for placeholders:
- If user input (conversation) supplies a value, use it.
- Otherwise infer from existing repo context (README, docs, prior constitution versions if embedded).
- For governance dates: `RATIFICATION_DATE` is the original adoption date (if unknown ask or mark TODO), `LAST_AMENDED_DATE` is today if changes are made, otherwise keep previous.
- `CONSTITUTION_VERSION` must increment according to semantic versioning rules:
* MAJOR: Backward incompatible governance/principle removals or redefinitions.
* MINOR: New principle/section added or materially expanded guidance.
* PATCH: Clarifications, wording, typo fixes, non-semantic refinements.
- If version bump type ambiguous, propose reasoning before finalizing.
3. Draft the updated constitution content:
- Replace every placeholder with concrete text (no bracketed tokens left except intentionally retained template slots that the project has chosen not to define yet—explicitly justify any left).
- Preserve heading hierarchy and comments can be removed once replaced unless they still add clarifying guidance.
- Ensure each Principle section: succinct name line, paragraph (or bullet list) capturing nonnegotiable rules, explicit rationale if not obvious.
- Ensure Governance section lists amendment procedure, versioning policy, and compliance review expectations.
4. Consistency propagation checklist (convert prior checklist into active validations):
- Read `.specify/templates/plan-template.md` and ensure any "Constitution Check" or rules align with updated principles.
- Read `.specify/templates/spec-template.md` for scope/requirements alignment—update if constitution adds/removes mandatory sections or constraints.
- Read `.specify/templates/tasks-template.md` and ensure task categorization reflects new or removed principle-driven task types (e.g., observability, versioning, testing discipline).
- Read each command file in `.specify/templates/commands/*.md` (including this one) to verify no outdated references (agent-specific names like CLAUDE only) remain when generic guidance is required.
- Read any runtime guidance docs (e.g., `README.md`, `docs/quickstart.md`, or agent-specific guidance files if present). Update references to principles changed.
5. Produce a Sync Impact Report (prepend as an HTML comment at top of the constitution file after update):
- Version change: old → new
- List of modified principles (old title → new title if renamed)
- Added sections
- Removed sections
- Templates requiring updates (✅ updated / ⚠ pending) with file paths
- Follow-up TODOs if any placeholders intentionally deferred.
6. Validation before final output:
- No remaining unexplained bracket tokens.
- Version line matches report.
- Dates ISO format YYYY-MM-DD.
- Principles are declarative, testable, and free of vague language ("should" → replace with MUST/SHOULD rationale where appropriate).
7. Write the completed constitution back to `.specify/memory/constitution.md` (overwrite).
8. Output a final summary to the user with:
- New version and bump rationale.
- Any files flagged for manual follow-up.
- Suggested commit message (e.g., `docs: amend constitution to vX.Y.Z (principle additions + governance update)`).
Formatting & Style Requirements:
- Use Markdown headings exactly as in the template (do not demote/promote levels).
- Wrap long rationale lines to keep readability (<100 chars ideally) but do not hard enforce with awkward breaks.
- Keep a single blank line between sections.
- Avoid trailing whitespace.
If the user supplies partial updates (e.g., only one principle revision), still perform validation and version decision steps.
If critical info missing (e.g., ratification date truly unknown), insert `TODO(<FIELD_NAME>): explanation` and include in the Sync Impact Report under deferred items.
Do not create a new template; always operate on the existing `.specify/memory/constitution.md` file.

View File

@@ -0,0 +1,128 @@
---
description: Execute the implementation plan by processing and executing all tasks defined in tasks.md
---
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Outline
1. Run `.specify/scripts/bash/check-prerequisites.sh --json --require-tasks --include-tasks` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
2. **Check checklists status** (if FEATURE_DIR/checklists/ exists):
- Scan all checklist files in the checklists/ directory
- For each checklist, count:
* Total items: All lines matching `- [ ]` or `- [X]` or `- [x]`
* Completed items: Lines matching `- [X]` or `- [x]`
* Incomplete items: Lines matching `- [ ]`
- Create a status table:
```
| Checklist | Total | Completed | Incomplete | Status |
|-----------|-------|-----------|------------|--------|
| ux.md | 12 | 12 | 0 | ✓ PASS |
| test.md | 8 | 5 | 3 | ✗ FAIL |
| security.md | 6 | 6 | 0 | ✓ PASS |
```
- Calculate overall status:
* **PASS**: All checklists have 0 incomplete items
* **FAIL**: One or more checklists have incomplete items
- **If any checklist is incomplete**:
* Display the table with incomplete item counts
* **STOP** and ask: "Some checklists are incomplete. Do you want to proceed with implementation anyway? (yes/no)"
* Wait for user response before continuing
* If user says "no" or "wait" or "stop", halt execution
* If user says "yes" or "proceed" or "continue", proceed to step 3
- **If all checklists are complete**:
* Display the table showing all checklists passed
* Automatically proceed to step 3
3. Load and analyze the implementation context:
- **REQUIRED**: Read tasks.md for the complete task list and execution plan
- **REQUIRED**: Read plan.md for tech stack, architecture, and file structure
- **IF EXISTS**: Read data-model.md for entities and relationships
- **IF EXISTS**: Read contracts/ for API specifications and test requirements
- **IF EXISTS**: Read research.md for technical decisions and constraints
- **IF EXISTS**: Read quickstart.md for integration scenarios
4. **Project Setup Verification**:
- **REQUIRED**: Create/verify ignore files based on actual project setup:
**Detection & Creation Logic**:
- Check if the following command succeeds to determine if the repository is a git repo (create/verify .gitignore if so):
```sh
git rev-parse --git-dir 2>/dev/null
```
- Check if Dockerfile* exists or Docker in plan.md → create/verify .dockerignore
- Check if .eslintrc* or eslint.config.* exists → create/verify .eslintignore
- Check if .prettierrc* exists → create/verify .prettierignore
- Check if .npmrc or package.json exists → create/verify .npmignore (if publishing)
- Check if terraform files (*.tf) exist → create/verify .terraformignore
- Check if .helmignore needed (helm charts present) → create/verify .helmignore
**If ignore file already exists**: Verify it contains essential patterns, append missing critical patterns only
**If ignore file missing**: Create with full pattern set for detected technology
**Common Patterns by Technology** (from plan.md tech stack):
- **Node.js/JavaScript**: `node_modules/`, `dist/`, `build/`, `*.log`, `.env*`
- **Python**: `__pycache__/`, `*.pyc`, `.venv/`, `venv/`, `dist/`, `*.egg-info/`
- **Java**: `target/`, `*.class`, `*.jar`, `.gradle/`, `build/`
- **C#/.NET**: `bin/`, `obj/`, `*.user`, `*.suo`, `packages/`
- **Go**: `*.exe`, `*.test`, `vendor/`, `*.out`
- **Ruby**: `.bundle/`, `log/`, `tmp/`, `*.gem`, `vendor/bundle/`
- **PHP**: `vendor/`, `*.log`, `*.cache`, `*.env`
- **Rust**: `target/`, `debug/`, `release/`, `*.rs.bk`, `*.rlib`, `*.prof*`, `.idea/`, `*.log`, `.env*`
- **Kotlin**: `build/`, `out/`, `.gradle/`, `.idea/`, `*.class`, `*.jar`, `*.iml`, `*.log`, `.env*`
- **C++**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.so`, `*.a`, `*.exe`, `*.dll`, `.idea/`, `*.log`, `.env*`
- **C**: `build/`, `bin/`, `obj/`, `out/`, `*.o`, `*.a`, `*.so`, `*.exe`, `Makefile`, `config.log`, `.idea/`, `*.log`, `.env*`
- **Universal**: `.DS_Store`, `Thumbs.db`, `*.tmp`, `*.swp`, `.vscode/`, `.idea/`
**Tool-Specific Patterns**:
- **Docker**: `node_modules/`, `.git/`, `Dockerfile*`, `.dockerignore`, `*.log*`, `.env*`, `coverage/`
- **ESLint**: `node_modules/`, `dist/`, `build/`, `coverage/`, `*.min.js`
- **Prettier**: `node_modules/`, `dist/`, `build/`, `coverage/`, `package-lock.json`, `yarn.lock`, `pnpm-lock.yaml`
- **Terraform**: `.terraform/`, `*.tfstate*`, `*.tfvars`, `.terraform.lock.hcl`
5. Parse tasks.md structure and extract:
- **Task phases**: Setup, Tests, Core, Integration, Polish
- **Task dependencies**: Sequential vs parallel execution rules
- **Task details**: ID, description, file paths, parallel markers [P]
- **Execution flow**: Order and dependency requirements
6. Execute implementation following the task plan:
- **Phase-by-phase execution**: Complete each phase before moving to the next
- **Respect dependencies**: Run sequential tasks in order, parallel tasks [P] can run together
- **Follow TDD approach**: Execute test tasks before their corresponding implementation tasks
- **File-based coordination**: Tasks affecting the same files must run sequentially
- **Validation checkpoints**: Verify each phase completion before proceeding
7. Implementation execution rules:
- **Setup first**: Initialize project structure, dependencies, configuration
- **Tests before code**: If you need to write tests for contracts, entities, and integration scenarios
- **Core development**: Implement models, services, CLI commands, endpoints
- **Integration work**: Database connections, middleware, logging, external services
- **Polish and validation**: Unit tests, performance optimization, documentation
8. Progress tracking and error handling:
- Report progress after each completed task
- Halt execution if any non-parallel task fails
- For parallel tasks [P], continue with successful tasks, report failed ones
- Provide clear error messages with context for debugging
- Suggest next steps if implementation cannot proceed
- **IMPORTANT** For completed tasks, make sure to mark the task off as [X] in the tasks file.
9. Completion validation:
- Verify all required tasks are completed
- Check that implemented features match the original specification
- Validate that tests pass and coverage meets requirements
- Confirm the implementation follows the technical plan
- Report final status with summary of completed work
Note: This command assumes a complete task breakdown exists in tasks.md. If tasks are incomplete or missing, suggest running `/tasks` first to regenerate the task list.

View File

@@ -0,0 +1,80 @@
---
description: Execute the implementation planning workflow using the plan template to generate design artifacts.
---
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Outline
1. **Setup**: Run `.specify/scripts/bash/setup-plan.sh --json` from repo root and parse JSON for FEATURE_SPEC, IMPL_PLAN, SPECS_DIR, BRANCH. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
2. **Load context**: Read FEATURE_SPEC and `.specify/memory/constitution.md`. Load IMPL_PLAN template (already copied).
3. **Execute plan workflow**: Follow the structure in IMPL_PLAN template to:
- Fill Technical Context (mark unknowns as "NEEDS CLARIFICATION")
- Fill Constitution Check section from constitution
- Evaluate gates (ERROR if violations unjustified)
- Phase 0: Generate research.md (resolve all NEEDS CLARIFICATION)
- Phase 1: Generate data-model.md, contracts/, quickstart.md
- Phase 1: Update agent context by running the agent script
- Re-evaluate Constitution Check post-design
4. **Stop and report**: Command ends after Phase 2 planning. Report branch, IMPL_PLAN path, and generated artifacts.
## Phases
### Phase 0: Outline & Research
1. **Extract unknowns from Technical Context** above:
- For each NEEDS CLARIFICATION → research task
- For each dependency → best practices task
- For each integration → patterns task
2. **Generate and dispatch research agents**:
```
For each unknown in Technical Context:
Task: "Research {unknown} for {feature context}"
For each technology choice:
Task: "Find best practices for {tech} in {domain}"
```
3. **Consolidate findings** in `research.md` using format:
- Decision: [what was chosen]
- Rationale: [why chosen]
- Alternatives considered: [what else evaluated]
**Output**: research.md with all NEEDS CLARIFICATION resolved
### Phase 1: Design & Contracts
**Prerequisites:** `research.md` complete
1. **Extract entities from feature spec** → `data-model.md`:
- Entity name, fields, relationships
- Validation rules from requirements
- State transitions if applicable
2. **Generate API contracts** from functional requirements:
- For each user action → endpoint
- Use standard REST/GraphQL patterns
- Output OpenAPI/GraphQL schema to `/contracts/`
3. **Agent context update**:
- Run `.specify/scripts/bash/update-agent-context.sh cursor-agent`
- These scripts detect which AI agent is in use
- Update the appropriate agent-specific context file
- Add only new technology from current plan
- Preserve manual additions between markers
**Output**: data-model.md, /contracts/*, quickstart.md, agent-specific file
## Key rules
- Use absolute paths
- ERROR on gate failures or unresolved clarifications

View File

@@ -0,0 +1,229 @@
---
description: Create or update the feature specification from a natural language feature description.
---
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Outline
The text the user typed after `/speckit.specify` in the triggering message **is** the feature description. Assume you always have it available in this conversation even if `$ARGUMENTS` appears literally below. Do not ask the user to repeat it unless they provided an empty command.
Given that feature description, do this:
1. **Generate a concise short name** (2-4 words) for the branch:
- Analyze the feature description and extract the most meaningful keywords
- Create a 2-4 word short name that captures the essence of the feature
- Use action-noun format when possible (e.g., "add-user-auth", "fix-payment-bug")
- Preserve technical terms and acronyms (OAuth2, API, JWT, etc.)
- Keep it concise but descriptive enough to understand the feature at a glance
- Examples:
- "I want to add user authentication" → "user-auth"
- "Implement OAuth2 integration for the API" → "oauth2-api-integration"
- "Create a dashboard for analytics" → "analytics-dashboard"
- "Fix payment processing timeout bug" → "fix-payment-timeout"
2. Run the script `.specify/scripts/bash/create-new-feature.sh --json "$ARGUMENTS"` from repo root **with the short-name argument** and parse its JSON output for BRANCH_NAME and SPEC_FILE. All file paths must be absolute.
**IMPORTANT**:
- Append the short-name argument to the `.specify/scripts/bash/create-new-feature.sh --json "$ARGUMENTS"` command with the 2-4 word short name you created in step 1
- Bash: `--short-name "your-generated-short-name"`
- PowerShell: `-ShortName "your-generated-short-name"`
- For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot")
- You must only ever run this script once
- The JSON is provided in the terminal as output - always refer to it to get the actual content you're looking for
3. Load `.specify/templates/spec-template.md` to understand required sections.
4. Follow this execution flow:
1. Parse user description from Input
If empty: ERROR "No feature description provided"
2. Extract key concepts from description
Identify: actors, actions, data, constraints
3. For unclear aspects:
- Make informed guesses based on context and industry standards
- Only mark with [NEEDS CLARIFICATION: specific question] if:
- The choice significantly impacts feature scope or user experience
- Multiple reasonable interpretations exist with different implications
- No reasonable default exists
- **LIMIT: Maximum 3 [NEEDS CLARIFICATION] markers total**
- Prioritize clarifications by impact: scope > security/privacy > user experience > technical details
4. Fill User Scenarios & Testing section
If no clear user flow: ERROR "Cannot determine user scenarios"
5. Generate Functional Requirements
Each requirement must be testable
Use reasonable defaults for unspecified details (document assumptions in Assumptions section)
6. Define Success Criteria
Create measurable, technology-agnostic outcomes
Include both quantitative metrics (time, performance, volume) and qualitative measures (user satisfaction, task completion)
Each criterion must be verifiable without implementation details
7. Identify Key Entities (if data involved)
8. Return: SUCCESS (spec ready for planning)
5. Write the specification to SPEC_FILE using the template structure, replacing placeholders with concrete details derived from the feature description (arguments) while preserving section order and headings.
6. **Specification Quality Validation**: After writing the initial spec, validate it against quality criteria:
a. **Create Spec Quality Checklist**: Generate a checklist file at `FEATURE_DIR/checklists/requirements.md` using the checklist template structure with these validation items:
```markdown
# Specification Quality Checklist: [FEATURE NAME]
**Purpose**: Validate specification completeness and quality before proceeding to planning
**Created**: [DATE]
**Feature**: [Link to spec.md]
## Content Quality
- [ ] No implementation details (languages, frameworks, APIs)
- [ ] Focused on user value and business needs
- [ ] Written for non-technical stakeholders
- [ ] All mandatory sections completed
## Requirement Completeness
- [ ] No [NEEDS CLARIFICATION] markers remain
- [ ] Requirements are testable and unambiguous
- [ ] Success criteria are measurable
- [ ] Success criteria are technology-agnostic (no implementation details)
- [ ] All acceptance scenarios are defined
- [ ] Edge cases are identified
- [ ] Scope is clearly bounded
- [ ] Dependencies and assumptions identified
## Feature Readiness
- [ ] All functional requirements have clear acceptance criteria
- [ ] User scenarios cover primary flows
- [ ] Feature meets measurable outcomes defined in Success Criteria
- [ ] No implementation details leak into specification
## Notes
- Items marked incomplete require spec updates before `/speckit.clarify` or `/speckit.plan`
```
b. **Run Validation Check**: Review the spec against each checklist item:
- For each item, determine if it passes or fails
- Document specific issues found (quote relevant spec sections)
c. **Handle Validation Results**:
- **If all items pass**: Mark checklist complete and proceed to step 6
- **If items fail (excluding [NEEDS CLARIFICATION])**:
1. List the failing items and specific issues
2. Update the spec to address each issue
3. Re-run validation until all items pass (max 3 iterations)
4. If still failing after 3 iterations, document remaining issues in checklist notes and warn user
- **If [NEEDS CLARIFICATION] markers remain**:
1. Extract all [NEEDS CLARIFICATION: ...] markers from the spec
2. **LIMIT CHECK**: If more than 3 markers exist, keep only the 3 most critical (by scope/security/UX impact) and make informed guesses for the rest
3. For each clarification needed (max 3), present options to user in this format:
```markdown
## Question [N]: [Topic]
**Context**: [Quote relevant spec section]
**What we need to know**: [Specific question from NEEDS CLARIFICATION marker]
**Suggested Answers**:
| Option | Answer | Implications |
|--------|--------|--------------|
| A | [First suggested answer] | [What this means for the feature] |
| B | [Second suggested answer] | [What this means for the feature] |
| C | [Third suggested answer] | [What this means for the feature] |
| Custom | Provide your own answer | [Explain how to provide custom input] |
**Your choice**: _[Wait for user response]_
```
4. **CRITICAL - Table Formatting**: Ensure markdown tables are properly formatted:
- Use consistent spacing with pipes aligned
- Each cell should have spaces around content: `| Content |` not `|Content|`
- Header separator must have at least 3 dashes: `|--------|`
- Test that the table renders correctly in markdown preview
5. Number questions sequentially (Q1, Q2, Q3 - max 3 total)
6. Present all questions together before waiting for responses
7. Wait for user to respond with their choices for all questions (e.g., "Q1: A, Q2: Custom - [details], Q3: B")
8. Update the spec by replacing each [NEEDS CLARIFICATION] marker with the user's selected or provided answer
9. Re-run validation after all clarifications are resolved
d. **Update Checklist**: After each validation iteration, update the checklist file with current pass/fail status
7. Report completion with branch name, spec file path, checklist results, and readiness for the next phase (`/speckit.clarify` or `/speckit.plan`).
**NOTE:** The script creates and checks out the new branch and initializes the spec file before writing.
## General Guidelines
## Quick Guidelines
- Focus on **WHAT** users need and **WHY**.
- Avoid HOW to implement (no tech stack, APIs, code structure).
- Written for business stakeholders, not developers.
- DO NOT create any checklists that are embedded in the spec. That will be a separate command.
### Section Requirements
- **Mandatory sections**: Must be completed for every feature
- **Optional sections**: Include only when relevant to the feature
- When a section doesn't apply, remove it entirely (don't leave as "N/A")
### For AI Generation
When creating this spec from a user prompt:
1. **Make informed guesses**: Use context, industry standards, and common patterns to fill gaps
2. **Document assumptions**: Record reasonable defaults in the Assumptions section
3. **Limit clarifications**: Maximum 3 [NEEDS CLARIFICATION] markers - use only for critical decisions that:
- Significantly impact feature scope or user experience
- Have multiple reasonable interpretations with different implications
- Lack any reasonable default
4. **Prioritize clarifications**: scope > security/privacy > user experience > technical details
5. **Think like a tester**: Every vague requirement should fail the "testable and unambiguous" checklist item
6. **Common areas needing clarification** (only if no reasonable default exists):
- Feature scope and boundaries (include/exclude specific use cases)
- User types and permissions (if multiple conflicting interpretations possible)
- Security/compliance requirements (when legally/financially significant)
**Examples of reasonable defaults** (don't ask about these):
- Data retention: Industry-standard practices for the domain
- Performance targets: Standard web/mobile app expectations unless specified
- Error handling: User-friendly messages with appropriate fallbacks
- Authentication method: Standard session-based or OAuth2 for web apps
- Integration patterns: RESTful APIs unless specified otherwise
### Success Criteria Guidelines
Success criteria must be:
1. **Measurable**: Include specific metrics (time, percentage, count, rate)
2. **Technology-agnostic**: No mention of frameworks, languages, databases, or tools
3. **User-focused**: Describe outcomes from user/business perspective, not system internals
4. **Verifiable**: Can be tested/validated without knowing implementation details
**Good examples**:
- "Users can complete checkout in under 3 minutes"
- "System supports 10,000 concurrent users"
- "95% of searches return results in under 1 second"
- "Task completion rate improves by 40%"
**Bad examples** (implementation-focused):
- "API response time is under 200ms" (too technical, use "Users see results instantly")
- "Database can handle 1000 TPS" (implementation detail, use user-facing metric)
- "React components render efficiently" (framework-specific)
- "Redis cache hit rate above 80%" (technology-specific)

View File

@@ -0,0 +1,128 @@
---
description: Generate an actionable, dependency-ordered tasks.md for the feature based on available design artifacts.
---
## User Input
```text
$ARGUMENTS
```
You **MUST** consider the user input before proceeding (if not empty).
## Outline
1. **Setup**: Run `.specify/scripts/bash/check-prerequisites.sh --json` from repo root and parse FEATURE_DIR and AVAILABLE_DOCS list. All paths must be absolute. For single quotes in args like "I'm Groot", use escape syntax: e.g 'I'\''m Groot' (or double-quote if possible: "I'm Groot").
2. **Load design documents**: Read from FEATURE_DIR:
- **Required**: plan.md (tech stack, libraries, structure), spec.md (user stories with priorities)
- **Optional**: data-model.md (entities), contracts/ (API endpoints), research.md (decisions), quickstart.md (test scenarios)
- Note: Not all projects have all documents. Generate tasks based on what's available.
3. **Execute task generation workflow**:
- Load plan.md and extract tech stack, libraries, project structure
- Load spec.md and extract user stories with their priorities (P1, P2, P3, etc.)
- If data-model.md exists: Extract entities and map to user stories
- If contracts/ exists: Map endpoints to user stories
- If research.md exists: Extract decisions for setup tasks
- Generate tasks organized by user story (see Task Generation Rules below)
- Generate dependency graph showing user story completion order
- Create parallel execution examples per user story
- Validate task completeness (each user story has all needed tasks, independently testable)
4. **Generate tasks.md**: Use `.specify.specify/templates/tasks-template.md` as structure, fill with:
- Correct feature name from plan.md
- Phase 1: Setup tasks (project initialization)
- Phase 2: Foundational tasks (blocking prerequisites for all user stories)
- Phase 3+: One phase per user story (in priority order from spec.md)
- Each phase includes: story goal, independent test criteria, tests (if requested), implementation tasks
- Final Phase: Polish & cross-cutting concerns
- All tasks must follow the strict checklist format (see Task Generation Rules below)
- Clear file paths for each task
- Dependencies section showing story completion order
- Parallel execution examples per story
- Implementation strategy section (MVP first, incremental delivery)
5. **Report**: Output path to generated tasks.md and summary:
- Total task count
- Task count per user story
- Parallel opportunities identified
- Independent test criteria for each story
- Suggested MVP scope (typically just User Story 1)
- Format validation: Confirm ALL tasks follow the checklist format (checkbox, ID, labels, file paths)
Context for task generation: $ARGUMENTS
The tasks.md should be immediately executable - each task must be specific enough that an LLM can complete it without additional context.
## Task Generation Rules
**CRITICAL**: Tasks MUST be organized by user story to enable independent implementation and testing.
**Tests are OPTIONAL**: Only generate test tasks if explicitly requested in the feature specification or if user requests TDD approach.
### Checklist Format (REQUIRED)
Every task MUST strictly follow this format:
```text
- [ ] [TaskID] [P?] [Story?] Description with file path
```
**Format Components**:
1. **Checkbox**: ALWAYS start with `- [ ]` (markdown checkbox)
2. **Task ID**: Sequential number (T001, T002, T003...) in execution order
3. **[P] marker**: Include ONLY if task is parallelizable (different files, no dependencies on incomplete tasks)
4. **[Story] label**: REQUIRED for user story phase tasks only
- Format: [US1], [US2], [US3], etc. (maps to user stories from spec.md)
- Setup phase: NO story label
- Foundational phase: NO story label
- User Story phases: MUST have story label
- Polish phase: NO story label
5. **Description**: Clear action with exact file path
**Examples**:
- ✅ CORRECT: `- [ ] T001 Create project structure per implementation plan`
- ✅ CORRECT: `- [ ] T005 [P] Implement authentication middleware in src/middleware/auth.py`
- ✅ CORRECT: `- [ ] T012 [P] [US1] Create User model in src/models/user.py`
- ✅ CORRECT: `- [ ] T014 [US1] Implement UserService in src/services/user_service.py`
- ❌ WRONG: `- [ ] Create User model` (missing ID and Story label)
- ❌ WRONG: `T001 [US1] Create model` (missing checkbox)
- ❌ WRONG: `- [ ] [US1] Create User model` (missing Task ID)
- ❌ WRONG: `- [ ] T001 [US1] Create model` (missing file path)
### Task Organization
1. **From User Stories (spec.md)** - PRIMARY ORGANIZATION:
- Each user story (P1, P2, P3...) gets its own phase
- Map all related components to their story:
- Models needed for that story
- Services needed for that story
- Endpoints/UI needed for that story
- If tests requested: Tests specific to that story
- Mark story dependencies (most stories should be independent)
2. **From Contracts**:
- Map each contract/endpoint → to the user story it serves
- If tests requested: Each contract → contract test task [P] before implementation in that story's phase
3. **From Data Model**:
- Map each entity to the user story(ies) that need it
- If entity serves multiple stories: Put in earliest story or Setup phase
- Relationships → service layer tasks in appropriate story phase
4. **From Setup/Infrastructure**:
- Shared infrastructure → Setup phase (Phase 1)
- Foundational/blocking tasks → Foundational phase (Phase 2)
- Story-specific setup → within that story's phase
### Phase Structure
- **Phase 1**: Setup (project initialization)
- **Phase 2**: Foundational (blocking prerequisites - MUST complete before user stories)
- **Phase 3+**: User Stories in priority order (P1, P2, P3...)
- Within each story: Tests (if requested) → Models → Services → Endpoints → Integration
- Each phase should be a complete, independently testable increment
- **Final Phase**: Polish & Cross-Cutting Concerns

93
.dockerignore Normal file
View File

@@ -0,0 +1,93 @@
# Dependencies
node_modules/
**/node_modules/
.pnpm-store/
# Build outputs
dist/
build/
**/dist/
**/build/
*.tsbuildinfo
# Development files
.git/
.gitignore
.vscode/
.idea/
*.swp
*.swo
*~
# Test files
test/
**/test/
**/*.spec.ts
**/*.test.ts
**/*.e2e-spec.ts
coverage/
.nyc_output/
test-results/
playwright-report/
# Documentation
*.md
!README.md
docs/
CHANGELOG.md
CONTRIBUTING.md
DISCLAIMER
LICENSE
# CI/CD
.github/
.gitpod.yml
# Environment files
.env
.env.*
!.env.example
# Logs
*.log
logs/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
# OS files
.DS_Store
Thumbs.db
*.pid
*.seed
*.pid.lock
# Docker files (don't copy Dockerfiles into themselves)
docker-compose*.yml
Dockerfile*
.dockerignore
# Misc
.cache/
.temp/
tmp/
*.tmp
.qodo/
e2e/
playwright.config.ts
# Source maps (not needed in production)
*.map
# TypeScript configs (not needed at runtime)
tsconfig*.json
!tsconfig.json
# Linting/formatting
.eslintrc*
.prettierrc*
.eslintcache
# Package manager locks (we copy them explicitly)
# pnpm-lock.yaml

View File

@@ -35,17 +35,10 @@ TENANT_DB_NAME_PERFIX=bigcapital_tenant_
BASE_URL=http://example.com
JWT_SECRET=b0JDZW56RnV6aEthb0RGPXVEcUI
# Jobs MongoDB
MONGODB_DATABASE_URL=mongodb://localhost/bigcapital
# App proxy
PUBLIC_PROXY_PORT=80
PUBLIC_PROXY_SSL_PORT=443
# Agendash
AGENDASH_AUTH_USER=agendash
AGENDASH_AUTH_PASSWORD=123123
# Sign-up restrictions
SIGNUP_DISABLED=false
SIGNUP_ALLOWED_DOMAINS=
@@ -98,8 +91,18 @@ POSTHOG_API_KEY=
POSTHOG_HOST=
# Stripe Payment
# Get the keys from the Stripe dashboard
# Starts with "sk_"
STRIPE_PAYMENT_SECRET_KEY=
# Starts with "pk_"
STRIPE_PAYMENT_PUBLISHABLE_KEY=
# Get the client ID from https://dashboard.stripe.com/settings/connect/onboarding-options/oauth
# Starts with "ca_"
STRIPE_PAYMENT_CLIENT_ID=
# Configure the webhook here: https://dashboard.stripe.com/workbench/webhooks/
# Endpoint URL is https://example.com/api/webhooks/stripe (replace "example.com" with the correct domain)
# Select the "checkout.session.completed" and "account.updated" events
# Starts with "whsec_"
STRIPE_PAYMENT_WEBHOOKS_SECRET=
STRIPE_PAYMENT_REDIRECT_URL=
# Replace example.com with the correct domain
STRIPE_PAYMENT_REDIRECT_URL=https://example.com/preferences/payment-methods/stripe/callback

62
.github/workflows/typecheck.yml vendored Normal file
View File

@@ -0,0 +1,62 @@
name: TypeCheck
on:
push:
branches:
- main
- develop
paths:
- '**.ts'
- '**.tsx'
- '**/tsconfig.json'
- '**/tsconfig.*.json'
- 'pnpm-lock.yaml'
- 'package.json'
- 'packages/*/package.json'
- 'shared/*/package.json'
- '.github/workflows/typecheck.yml'
pull_request:
paths:
- '**.ts'
- '**.tsx'
- '**/tsconfig.json'
- '**/tsconfig.*.json'
- 'pnpm-lock.yaml'
- 'package.json'
- 'packages/*/package.json'
- 'shared/*/package.json'
- '.github/workflows/typecheck.yml'
defaults:
run:
shell: 'bash'
jobs:
typecheck:
name: TypeScript Type Check
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
cache: 'pnpm'
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build shared packages
run: pnpm run build --scope "@bigcapital/utils" --scope "@bigcapital/email-components" --scope "@bigcapital/pdf-templates"
- name: Run TypeScript type check
run: pnpm run typecheck

View File

@@ -0,0 +1,140 @@
<!--
Sync Impact Report:
Version change: 1.0.0 → 1.0.0 (initial creation)
Modified principles: N/A (new constitution)
Added sections: Core Principles, Code Quality Standards, Testing Standards, User Experience Standards, Performance Requirements, Development Workflow, Governance
Removed sections: N/A
Templates requiring updates: ✅ plan-template.md (constitution check section), ✅ spec-template.md (no changes needed), ✅ tasks-template.md (no changes needed)
Follow-up TODOs: None
-->
# Bigcapital Constitution
## Core Principles
### I. Code Quality First (NON-NEGOTIABLE)
All code MUST meet minimum quality standards before merge. TypeScript strict mode enabled across all packages. ESLint rules enforced with zero warnings. Code reviews MUST verify readability, maintainability, and adherence to established patterns. Complex logic MUST be documented with clear comments explaining business rules and edge cases.
### II. Test-Driven Development
Test coverage MUST exceed 80% for business logic. Unit tests written BEFORE implementation for new features. Integration tests required for API endpoints and database operations. E2E tests mandatory for critical user journeys. All tests MUST be deterministic and runnable in CI/CD pipeline.
### III. User Experience Consistency
UI components MUST follow Blueprint.js design system patterns. Consistent error handling and loading states across all interfaces. Accessibility standards (WCAG 2.1 AA) mandatory for all user-facing features. Internationalization support required for all user-visible text.
### IV. Performance Requirements
Frontend bundle size MUST not exceed 2MB gzipped. API response times MUST be under 200ms for 95th percentile. Database queries MUST be optimized with proper indexing. Real-time features MUST handle 1000+ concurrent connections without degradation.
### V. Security & Data Integrity
All user data MUST be encrypted in transit and at rest. Authentication tokens MUST use secure, short-lived JWT with refresh mechanism. Input validation MUST prevent SQL injection and XSS attacks. Audit logging required for all financial transactions and user actions.
## Code Quality Standards
### TypeScript & Static Analysis
- Strict mode enabled with `noImplicitAny`, `strictNullChecks`
- ESLint configuration with TypeScript-specific rules
- Prettier formatting enforced across all packages
- Import organization and unused import detection
### Code Organization
- Monorepo structure with clear package boundaries
- Shared utilities in `@bigcapital/utils` package
- Component library in `@bigcapital/email-components` and `@bigcapital/pdf-templates`
- Clear separation between frontend (`webapp`) and backend (`server`) concerns
### Documentation Requirements
- JSDoc comments for all public APIs and complex functions
- README files for each package with setup and usage instructions
- Architecture decisions documented in ADR format
- API documentation generated from code annotations
## Testing Standards
### Unit Testing
- Jest framework for all unit tests
- Minimum 80% code coverage for business logic
- Mock external dependencies (database, APIs, file system)
- Test files co-located with source code (`*.test.ts`, `*.spec.ts`)
### Integration Testing
- API endpoint testing with supertest
- Database integration tests with test database
- Component integration tests with React Testing Library
- Cross-package integration tests for shared utilities
### End-to-End Testing
- Playwright for critical user journeys
- Test data setup and teardown automation
- Visual regression testing for UI components
- Performance testing for key workflows
## User Experience Standards
### Design System Compliance
- Blueprint.js components used consistently across application
- Custom components MUST extend Blueprint.js patterns
- Color palette and typography from established design tokens
- Responsive design for desktop and tablet viewports
### Accessibility Requirements
- WCAG 2.1 AA compliance for all user interfaces
- Keyboard navigation support for all interactive elements
- Screen reader compatibility with proper ARIA labels
- Color contrast ratios meeting accessibility standards
### Internationalization
- All user-facing text MUST use i18n framework
- Date, time, and currency formatting per user locale
- Right-to-left language support where applicable
- Translation keys organized by feature and context
## Performance Requirements
### Frontend Performance
- Initial bundle size under 2MB gzipped
- Code splitting for route-based lazy loading
- Image optimization and lazy loading
- Service worker for offline functionality
### Backend Performance
- API response times under 200ms (95th percentile)
- Database query optimization with proper indexing
- Caching strategy for frequently accessed data
- Connection pooling and resource management
### Scalability Targets
- Support 1000+ concurrent users
- Handle 10,000+ financial transactions per hour
- Real-time updates for 500+ simultaneous connections
- Horizontal scaling capability for high availability
## Development Workflow
### Git & Version Control
- Feature branches from `develop` branch
- Conventional commit messages enforced by commitlint
- Pull request reviews required for all changes
- Automated CI/CD pipeline with quality gates
### Code Review Process
- Minimum 2 approvals for production changes
- Security review for authentication and financial features
- Performance review for database and API changes
- UX review for user interface modifications
### Release Management
- Semantic versioning for all packages
- Automated changelog generation
- Staged deployment (dev → staging → production)
- Rollback capability for critical issues
## Governance
This constitution supersedes all other development practices and MUST be followed by all contributors. Amendments require:
1. Documentation of the proposed change with rationale
2. Review by core maintainers
3. Migration plan for existing code
4. Version bump following semantic versioning rules
All pull requests MUST verify compliance with constitution principles. Complexity beyond these standards MUST be justified with business requirements and approved by technical leads.
**Version**: 1.0.0 | **Ratified**: 2024-12-19 | **Last Amended**: 2024-12-19

View File

@@ -0,0 +1,166 @@
#!/usr/bin/env bash
# Consolidated prerequisite checking script
#
# This script provides unified prerequisite checking for Spec-Driven Development workflow.
# It replaces the functionality previously spread across multiple scripts.
#
# Usage: ./check-prerequisites.sh [OPTIONS]
#
# OPTIONS:
# --json Output in JSON format
# --require-tasks Require tasks.md to exist (for implementation phase)
# --include-tasks Include tasks.md in AVAILABLE_DOCS list
# --paths-only Only output path variables (no validation)
# --help, -h Show help message
#
# OUTPUTS:
# JSON mode: {"FEATURE_DIR":"...", "AVAILABLE_DOCS":["..."]}
# Text mode: FEATURE_DIR:... \n AVAILABLE_DOCS: \n ✓/✗ file.md
# Paths only: REPO_ROOT: ... \n BRANCH: ... \n FEATURE_DIR: ... etc.
set -e
# Parse command line arguments
JSON_MODE=false
REQUIRE_TASKS=false
INCLUDE_TASKS=false
PATHS_ONLY=false
for arg in "$@"; do
case "$arg" in
--json)
JSON_MODE=true
;;
--require-tasks)
REQUIRE_TASKS=true
;;
--include-tasks)
INCLUDE_TASKS=true
;;
--paths-only)
PATHS_ONLY=true
;;
--help|-h)
cat << 'EOF'
Usage: check-prerequisites.sh [OPTIONS]
Consolidated prerequisite checking for Spec-Driven Development workflow.
OPTIONS:
--json Output in JSON format
--require-tasks Require tasks.md to exist (for implementation phase)
--include-tasks Include tasks.md in AVAILABLE_DOCS list
--paths-only Only output path variables (no prerequisite validation)
--help, -h Show this help message
EXAMPLES:
# Check task prerequisites (plan.md required)
./check-prerequisites.sh --json
# Check implementation prerequisites (plan.md + tasks.md required)
./check-prerequisites.sh --json --require-tasks --include-tasks
# Get feature paths only (no validation)
./check-prerequisites.sh --paths-only
EOF
exit 0
;;
*)
echo "ERROR: Unknown option '$arg'. Use --help for usage information." >&2
exit 1
;;
esac
done
# Source common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/common.sh"
# Get feature paths and validate branch
eval $(get_feature_paths)
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
# If paths-only mode, output paths and exit (support JSON + paths-only combined)
if $PATHS_ONLY; then
if $JSON_MODE; then
# Minimal JSON paths payload (no validation performed)
printf '{"REPO_ROOT":"%s","BRANCH":"%s","FEATURE_DIR":"%s","FEATURE_SPEC":"%s","IMPL_PLAN":"%s","TASKS":"%s"}\n' \
"$REPO_ROOT" "$CURRENT_BRANCH" "$FEATURE_DIR" "$FEATURE_SPEC" "$IMPL_PLAN" "$TASKS"
else
echo "REPO_ROOT: $REPO_ROOT"
echo "BRANCH: $CURRENT_BRANCH"
echo "FEATURE_DIR: $FEATURE_DIR"
echo "FEATURE_SPEC: $FEATURE_SPEC"
echo "IMPL_PLAN: $IMPL_PLAN"
echo "TASKS: $TASKS"
fi
exit 0
fi
# Validate required directories and files
if [[ ! -d "$FEATURE_DIR" ]]; then
echo "ERROR: Feature directory not found: $FEATURE_DIR" >&2
echo "Run /speckit.specify first to create the feature structure." >&2
exit 1
fi
if [[ ! -f "$IMPL_PLAN" ]]; then
echo "ERROR: plan.md not found in $FEATURE_DIR" >&2
echo "Run /speckit.plan first to create the implementation plan." >&2
exit 1
fi
# Check for tasks.md if required
if $REQUIRE_TASKS && [[ ! -f "$TASKS" ]]; then
echo "ERROR: tasks.md not found in $FEATURE_DIR" >&2
echo "Run /speckit.tasks first to create the task list." >&2
exit 1
fi
# Build list of available documents
docs=()
# Always check these optional docs
[[ -f "$RESEARCH" ]] && docs+=("research.md")
[[ -f "$DATA_MODEL" ]] && docs+=("data-model.md")
# Check contracts directory (only if it exists and has files)
if [[ -d "$CONTRACTS_DIR" ]] && [[ -n "$(ls -A "$CONTRACTS_DIR" 2>/dev/null)" ]]; then
docs+=("contracts/")
fi
[[ -f "$QUICKSTART" ]] && docs+=("quickstart.md")
# Include tasks.md if requested and it exists
if $INCLUDE_TASKS && [[ -f "$TASKS" ]]; then
docs+=("tasks.md")
fi
# Output results
if $JSON_MODE; then
# Build JSON array of documents
if [[ ${#docs[@]} -eq 0 ]]; then
json_docs="[]"
else
json_docs=$(printf '"%s",' "${docs[@]}")
json_docs="[${json_docs%,}]"
fi
printf '{"FEATURE_DIR":"%s","AVAILABLE_DOCS":%s}\n' "$FEATURE_DIR" "$json_docs"
else
# Text output
echo "FEATURE_DIR:$FEATURE_DIR"
echo "AVAILABLE_DOCS:"
# Show status of each potential document
check_file "$RESEARCH" "research.md"
check_file "$DATA_MODEL" "data-model.md"
check_dir "$CONTRACTS_DIR" "contracts/"
check_file "$QUICKSTART" "quickstart.md"
if $INCLUDE_TASKS; then
check_file "$TASKS" "tasks.md"
fi
fi

View File

@@ -0,0 +1,156 @@
#!/usr/bin/env bash
# Common functions and variables for all scripts
# Get repository root, with fallback for non-git repositories
get_repo_root() {
if git rev-parse --show-toplevel >/dev/null 2>&1; then
git rev-parse --show-toplevel
else
# Fall back to script location for non-git repos
local script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
(cd "$script_dir/../../.." && pwd)
fi
}
# Get current branch, with fallback for non-git repositories
get_current_branch() {
# First check if SPECIFY_FEATURE environment variable is set
if [[ -n "${SPECIFY_FEATURE:-}" ]]; then
echo "$SPECIFY_FEATURE"
return
fi
# Then check git if available
if git rev-parse --abbrev-ref HEAD >/dev/null 2>&1; then
git rev-parse --abbrev-ref HEAD
return
fi
# For non-git repos, try to find the latest feature directory
local repo_root=$(get_repo_root)
local specs_dir="$repo_root/specs"
if [[ -d "$specs_dir" ]]; then
local latest_feature=""
local highest=0
for dir in "$specs_dir"/*; do
if [[ -d "$dir" ]]; then
local dirname=$(basename "$dir")
if [[ "$dirname" =~ ^([0-9]{3})- ]]; then
local number=${BASH_REMATCH[1]}
number=$((10#$number))
if [[ "$number" -gt "$highest" ]]; then
highest=$number
latest_feature=$dirname
fi
fi
fi
done
if [[ -n "$latest_feature" ]]; then
echo "$latest_feature"
return
fi
fi
echo "main" # Final fallback
}
# Check if we have git available
has_git() {
git rev-parse --show-toplevel >/dev/null 2>&1
}
check_feature_branch() {
local branch="$1"
local has_git_repo="$2"
# For non-git repos, we can't enforce branch naming but still provide output
if [[ "$has_git_repo" != "true" ]]; then
echo "[specify] Warning: Git repository not detected; skipped branch validation" >&2
return 0
fi
if [[ ! "$branch" =~ ^[0-9]{3}- ]]; then
echo "ERROR: Not on a feature branch. Current branch: $branch" >&2
echo "Feature branches should be named like: 001-feature-name" >&2
return 1
fi
return 0
}
get_feature_dir() { echo "$1/specs/$2"; }
# Find feature directory by numeric prefix instead of exact branch match
# This allows multiple branches to work on the same spec (e.g., 004-fix-bug, 004-add-feature)
find_feature_dir_by_prefix() {
local repo_root="$1"
local branch_name="$2"
local specs_dir="$repo_root/specs"
# Extract numeric prefix from branch (e.g., "004" from "004-whatever")
if [[ ! "$branch_name" =~ ^([0-9]{3})- ]]; then
# If branch doesn't have numeric prefix, fall back to exact match
echo "$specs_dir/$branch_name"
return
fi
local prefix="${BASH_REMATCH[1]}"
# Search for directories in specs/ that start with this prefix
local matches=()
if [[ -d "$specs_dir" ]]; then
for dir in "$specs_dir"/"$prefix"-*; do
if [[ -d "$dir" ]]; then
matches+=("$(basename "$dir")")
fi
done
fi
# Handle results
if [[ ${#matches[@]} -eq 0 ]]; then
# No match found - return the branch name path (will fail later with clear error)
echo "$specs_dir/$branch_name"
elif [[ ${#matches[@]} -eq 1 ]]; then
# Exactly one match - perfect!
echo "$specs_dir/${matches[0]}"
else
# Multiple matches - this shouldn't happen with proper naming convention
echo "ERROR: Multiple spec directories found with prefix '$prefix': ${matches[*]}" >&2
echo "Please ensure only one spec directory exists per numeric prefix." >&2
echo "$specs_dir/$branch_name" # Return something to avoid breaking the script
fi
}
get_feature_paths() {
local repo_root=$(get_repo_root)
local current_branch=$(get_current_branch)
local has_git_repo="false"
if has_git; then
has_git_repo="true"
fi
# Use prefix-based lookup to support multiple branches per spec
local feature_dir=$(find_feature_dir_by_prefix "$repo_root" "$current_branch")
cat <<EOF
REPO_ROOT='$repo_root'
CURRENT_BRANCH='$current_branch'
HAS_GIT='$has_git_repo'
FEATURE_DIR='$feature_dir'
FEATURE_SPEC='$feature_dir/spec.md'
IMPL_PLAN='$feature_dir/plan.md'
TASKS='$feature_dir/tasks.md'
RESEARCH='$feature_dir/research.md'
DATA_MODEL='$feature_dir/data-model.md'
QUICKSTART='$feature_dir/quickstart.md'
CONTRACTS_DIR='$feature_dir/contracts'
EOF
}
check_file() { [[ -f "$1" ]] && echo "$2" || echo "$2"; }
check_dir() { [[ -d "$1" && -n $(ls -A "$1" 2>/dev/null) ]] && echo "$2" || echo "$2"; }

View File

@@ -0,0 +1,200 @@
#!/usr/bin/env bash
set -e
JSON_MODE=false
SHORT_NAME=""
ARGS=()
i=0
while [ $i -lt $# ]; do
arg="${!i}"
case "$arg" in
--json)
JSON_MODE=true
;;
--short-name)
if [ $((i + 1)) -ge $# ]; then
echo 'Error: --short-name requires a value' >&2
exit 1
fi
i=$((i + 1))
SHORT_NAME="${!i}"
;;
--help|-h)
echo "Usage: $0 [--json] [--short-name <name>] <feature_description>"
echo ""
echo "Options:"
echo " --json Output in JSON format"
echo " --short-name <name> Provide a custom short name (2-4 words) for the branch"
echo " --help, -h Show this help message"
echo ""
echo "Examples:"
echo " $0 'Add user authentication system' --short-name 'user-auth'"
echo " $0 'Implement OAuth2 integration for API'"
exit 0
;;
*)
ARGS+=("$arg")
;;
esac
i=$((i + 1))
done
FEATURE_DESCRIPTION="${ARGS[*]}"
if [ -z "$FEATURE_DESCRIPTION" ]; then
echo "Usage: $0 [--json] [--short-name <name>] <feature_description>" >&2
exit 1
fi
# Function to find the repository root by searching for existing project markers
find_repo_root() {
local dir="$1"
while [ "$dir" != "/" ]; do
if [ -d "$dir/.git" ] || [ -d "$dir/.specify" ]; then
echo "$dir"
return 0
fi
dir="$(dirname "$dir")"
done
return 1
}
# Resolve repository root. Prefer git information when available, but fall back
# to searching for repository markers so the workflow still functions in repositories that
# were initialised with --no-git.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if git rev-parse --show-toplevel >/dev/null 2>&1; then
REPO_ROOT=$(git rev-parse --show-toplevel)
HAS_GIT=true
else
REPO_ROOT="$(find_repo_root "$SCRIPT_DIR")"
if [ -z "$REPO_ROOT" ]; then
echo "Error: Could not determine repository root. Please run this script from within the repository." >&2
exit 1
fi
HAS_GIT=false
fi
cd "$REPO_ROOT"
SPECS_DIR="$REPO_ROOT/specs"
mkdir -p "$SPECS_DIR"
HIGHEST=0
if [ -d "$SPECS_DIR" ]; then
for dir in "$SPECS_DIR"/*; do
[ -d "$dir" ] || continue
dirname=$(basename "$dir")
number=$(echo "$dirname" | grep -o '^[0-9]\+' || echo "0")
number=$((10#$number))
if [ "$number" -gt "$HIGHEST" ]; then HIGHEST=$number; fi
done
fi
NEXT=$((HIGHEST + 1))
FEATURE_NUM=$(printf "%03d" "$NEXT")
# Function to generate branch name with stop word filtering and length filtering
generate_branch_name() {
local description="$1"
# Common stop words to filter out
local stop_words="^(i|a|an|the|to|for|of|in|on|at|by|with|from|is|are|was|were|be|been|being|have|has|had|do|does|did|will|would|should|could|can|may|might|must|shall|this|that|these|those|my|your|our|their|want|need|add|get|set)$"
# Convert to lowercase and split into words
local clean_name=$(echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/ /g')
# Filter words: remove stop words and words shorter than 3 chars (unless they're uppercase acronyms in original)
local meaningful_words=()
for word in $clean_name; do
# Skip empty words
[ -z "$word" ] && continue
# Keep words that are NOT stop words AND (length >= 3 OR are potential acronyms)
if ! echo "$word" | grep -qiE "$stop_words"; then
if [ ${#word} -ge 3 ]; then
meaningful_words+=("$word")
elif echo "$description" | grep -q "\b${word^^}\b"; then
# Keep short words if they appear as uppercase in original (likely acronyms)
meaningful_words+=("$word")
fi
fi
done
# If we have meaningful words, use first 3-4 of them
if [ ${#meaningful_words[@]} -gt 0 ]; then
local max_words=3
if [ ${#meaningful_words[@]} -eq 4 ]; then max_words=4; fi
local result=""
local count=0
for word in "${meaningful_words[@]}"; do
if [ $count -ge $max_words ]; then break; fi
if [ -n "$result" ]; then result="$result-"; fi
result="$result$word"
count=$((count + 1))
done
echo "$result"
else
# Fallback to original logic if no meaningful words found
echo "$description" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//' | tr '-' '\n' | grep -v '^$' | head -3 | tr '\n' '-' | sed 's/-$//'
fi
}
# Generate branch name
if [ -n "$SHORT_NAME" ]; then
# Use provided short name, just clean it up
BRANCH_SUFFIX=$(echo "$SHORT_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/-\+/-/g' | sed 's/^-//' | sed 's/-$//')
else
# Generate from description with smart filtering
BRANCH_SUFFIX=$(generate_branch_name "$FEATURE_DESCRIPTION")
fi
BRANCH_NAME="${FEATURE_NUM}-${BRANCH_SUFFIX}"
# GitHub enforces a 244-byte limit on branch names
# Validate and truncate if necessary
MAX_BRANCH_LENGTH=244
if [ ${#BRANCH_NAME} -gt $MAX_BRANCH_LENGTH ]; then
# Calculate how much we need to trim from suffix
# Account for: feature number (3) + hyphen (1) = 4 chars
MAX_SUFFIX_LENGTH=$((MAX_BRANCH_LENGTH - 4))
# Truncate suffix at word boundary if possible
TRUNCATED_SUFFIX=$(echo "$BRANCH_SUFFIX" | cut -c1-$MAX_SUFFIX_LENGTH)
# Remove trailing hyphen if truncation created one
TRUNCATED_SUFFIX=$(echo "$TRUNCATED_SUFFIX" | sed 's/-$//')
ORIGINAL_BRANCH_NAME="$BRANCH_NAME"
BRANCH_NAME="${FEATURE_NUM}-${TRUNCATED_SUFFIX}"
>&2 echo "[specify] Warning: Branch name exceeded GitHub's 244-byte limit"
>&2 echo "[specify] Original: $ORIGINAL_BRANCH_NAME (${#ORIGINAL_BRANCH_NAME} bytes)"
>&2 echo "[specify] Truncated to: $BRANCH_NAME (${#BRANCH_NAME} bytes)"
fi
if [ "$HAS_GIT" = true ]; then
git checkout -b "$BRANCH_NAME"
else
>&2 echo "[specify] Warning: Git repository not detected; skipped branch creation for $BRANCH_NAME"
fi
FEATURE_DIR="$SPECS_DIR/$BRANCH_NAME"
mkdir -p "$FEATURE_DIR"
TEMPLATE="$REPO_ROOT/.specify/templates/spec-template.md"
SPEC_FILE="$FEATURE_DIR/spec.md"
if [ -f "$TEMPLATE" ]; then cp "$TEMPLATE" "$SPEC_FILE"; else touch "$SPEC_FILE"; fi
# Set the SPECIFY_FEATURE environment variable for the current session
export SPECIFY_FEATURE="$BRANCH_NAME"
if $JSON_MODE; then
printf '{"BRANCH_NAME":"%s","SPEC_FILE":"%s","FEATURE_NUM":"%s"}\n' "$BRANCH_NAME" "$SPEC_FILE" "$FEATURE_NUM"
else
echo "BRANCH_NAME: $BRANCH_NAME"
echo "SPEC_FILE: $SPEC_FILE"
echo "FEATURE_NUM: $FEATURE_NUM"
echo "SPECIFY_FEATURE environment variable set to: $BRANCH_NAME"
fi

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env bash
set -e
# Parse command line arguments
JSON_MODE=false
ARGS=()
for arg in "$@"; do
case "$arg" in
--json)
JSON_MODE=true
;;
--help|-h)
echo "Usage: $0 [--json]"
echo " --json Output results in JSON format"
echo " --help Show this help message"
exit 0
;;
*)
ARGS+=("$arg")
;;
esac
done
# Get script directory and load common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/common.sh"
# Get all paths and variables from common functions
eval $(get_feature_paths)
# Check if we're on a proper feature branch (only for git repos)
check_feature_branch "$CURRENT_BRANCH" "$HAS_GIT" || exit 1
# Ensure the feature directory exists
mkdir -p "$FEATURE_DIR"
# Copy plan template if it exists
TEMPLATE="$REPO_ROOT/.specify/templates/plan-template.md"
if [[ -f "$TEMPLATE" ]]; then
cp "$TEMPLATE" "$IMPL_PLAN"
echo "Copied plan template to $IMPL_PLAN"
else
echo "Warning: Plan template not found at $TEMPLATE"
# Create a basic plan file if template doesn't exist
touch "$IMPL_PLAN"
fi
# Output results
if $JSON_MODE; then
printf '{"FEATURE_SPEC":"%s","IMPL_PLAN":"%s","SPECS_DIR":"%s","BRANCH":"%s","HAS_GIT":"%s"}\n' \
"$FEATURE_SPEC" "$IMPL_PLAN" "$FEATURE_DIR" "$CURRENT_BRANCH" "$HAS_GIT"
else
echo "FEATURE_SPEC: $FEATURE_SPEC"
echo "IMPL_PLAN: $IMPL_PLAN"
echo "SPECS_DIR: $FEATURE_DIR"
echo "BRANCH: $CURRENT_BRANCH"
echo "HAS_GIT: $HAS_GIT"
fi

View File

@@ -0,0 +1,739 @@
#!/usr/bin/env bash
# Update agent context files with information from plan.md
#
# This script maintains AI agent context files by parsing feature specifications
# and updating agent-specific configuration files with project information.
#
# MAIN FUNCTIONS:
# 1. Environment Validation
# - Verifies git repository structure and branch information
# - Checks for required plan.md files and templates
# - Validates file permissions and accessibility
#
# 2. Plan Data Extraction
# - Parses plan.md files to extract project metadata
# - Identifies language/version, frameworks, databases, and project types
# - Handles missing or incomplete specification data gracefully
#
# 3. Agent File Management
# - Creates new agent context files from templates when needed
# - Updates existing agent files with new project information
# - Preserves manual additions and custom configurations
# - Supports multiple AI agent formats and directory structures
#
# 4. Content Generation
# - Generates language-specific build/test commands
# - Creates appropriate project directory structures
# - Updates technology stacks and recent changes sections
# - Maintains consistent formatting and timestamps
#
# 5. Multi-Agent Support
# - Handles agent-specific file paths and naming conventions
# - Supports: Claude, Gemini, Copilot, Cursor, Qwen, opencode, Codex, Windsurf, Kilo Code, Auggie CLI, or Amazon Q Developer CLI
# - Can update single agents or all existing agent files
# - Creates default Claude file if no agent files exist
#
# Usage: ./update-agent-context.sh [agent_type]
# Agent types: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|q
# Leave empty to update all existing agent files
set -e
# Enable strict error handling
set -u
set -o pipefail
#==============================================================================
# Configuration and Global Variables
#==============================================================================
# Get script directory and load common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/common.sh"
# Get all paths and variables from common functions
eval $(get_feature_paths)
NEW_PLAN="$IMPL_PLAN" # Alias for compatibility with existing code
AGENT_TYPE="${1:-}"
# Agent-specific file paths
CLAUDE_FILE="$REPO_ROOT/CLAUDE.md"
GEMINI_FILE="$REPO_ROOT/GEMINI.md"
COPILOT_FILE="$REPO_ROOT/.github/copilot-instructions.md"
CURSOR_FILE="$REPO_ROOT/.cursor/rules/specify-rules.mdc"
QWEN_FILE="$REPO_ROOT/QWEN.md"
AGENTS_FILE="$REPO_ROOT/AGENTS.md"
WINDSURF_FILE="$REPO_ROOT/.windsurf/rules/specify-rules.md"
KILOCODE_FILE="$REPO_ROOT/.kilocode/rules/specify-rules.md"
AUGGIE_FILE="$REPO_ROOT/.augment/rules/specify-rules.md"
ROO_FILE="$REPO_ROOT/.roo/rules/specify-rules.md"
CODEBUDDY_FILE="$REPO_ROOT/CODEBUDDY.md"
Q_FILE="$REPO_ROOT/AGENTS.md"
# Template file
TEMPLATE_FILE="$REPO_ROOT/.specify/templates/agent-file-template.md"
# Global variables for parsed plan data
NEW_LANG=""
NEW_FRAMEWORK=""
NEW_DB=""
NEW_PROJECT_TYPE=""
#==============================================================================
# Utility Functions
#==============================================================================
log_info() {
echo "INFO: $1"
}
log_success() {
echo "$1"
}
log_error() {
echo "ERROR: $1" >&2
}
log_warning() {
echo "WARNING: $1" >&2
}
# Cleanup function for temporary files
cleanup() {
local exit_code=$?
rm -f /tmp/agent_update_*_$$
rm -f /tmp/manual_additions_$$
exit $exit_code
}
# Set up cleanup trap
trap cleanup EXIT INT TERM
#==============================================================================
# Validation Functions
#==============================================================================
validate_environment() {
# Check if we have a current branch/feature (git or non-git)
if [[ -z "$CURRENT_BRANCH" ]]; then
log_error "Unable to determine current feature"
if [[ "$HAS_GIT" == "true" ]]; then
log_info "Make sure you're on a feature branch"
else
log_info "Set SPECIFY_FEATURE environment variable or create a feature first"
fi
exit 1
fi
# Check if plan.md exists
if [[ ! -f "$NEW_PLAN" ]]; then
log_error "No plan.md found at $NEW_PLAN"
log_info "Make sure you're working on a feature with a corresponding spec directory"
if [[ "$HAS_GIT" != "true" ]]; then
log_info "Use: export SPECIFY_FEATURE=your-feature-name or create a new feature first"
fi
exit 1
fi
# Check if template exists (needed for new files)
if [[ ! -f "$TEMPLATE_FILE" ]]; then
log_warning "Template file not found at $TEMPLATE_FILE"
log_warning "Creating new agent files will fail"
fi
}
#==============================================================================
# Plan Parsing Functions
#==============================================================================
extract_plan_field() {
local field_pattern="$1"
local plan_file="$2"
grep "^\*\*${field_pattern}\*\*: " "$plan_file" 2>/dev/null | \
head -1 | \
sed "s|^\*\*${field_pattern}\*\*: ||" | \
sed 's/^[ \t]*//;s/[ \t]*$//' | \
grep -v "NEEDS CLARIFICATION" | \
grep -v "^N/A$" || echo ""
}
parse_plan_data() {
local plan_file="$1"
if [[ ! -f "$plan_file" ]]; then
log_error "Plan file not found: $plan_file"
return 1
fi
if [[ ! -r "$plan_file" ]]; then
log_error "Plan file is not readable: $plan_file"
return 1
fi
log_info "Parsing plan data from $plan_file"
NEW_LANG=$(extract_plan_field "Language/Version" "$plan_file")
NEW_FRAMEWORK=$(extract_plan_field "Primary Dependencies" "$plan_file")
NEW_DB=$(extract_plan_field "Storage" "$plan_file")
NEW_PROJECT_TYPE=$(extract_plan_field "Project Type" "$plan_file")
# Log what we found
if [[ -n "$NEW_LANG" ]]; then
log_info "Found language: $NEW_LANG"
else
log_warning "No language information found in plan"
fi
if [[ -n "$NEW_FRAMEWORK" ]]; then
log_info "Found framework: $NEW_FRAMEWORK"
fi
if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]]; then
log_info "Found database: $NEW_DB"
fi
if [[ -n "$NEW_PROJECT_TYPE" ]]; then
log_info "Found project type: $NEW_PROJECT_TYPE"
fi
}
format_technology_stack() {
local lang="$1"
local framework="$2"
local parts=()
# Add non-empty parts
[[ -n "$lang" && "$lang" != "NEEDS CLARIFICATION" ]] && parts+=("$lang")
[[ -n "$framework" && "$framework" != "NEEDS CLARIFICATION" && "$framework" != "N/A" ]] && parts+=("$framework")
# Join with proper formatting
if [[ ${#parts[@]} -eq 0 ]]; then
echo ""
elif [[ ${#parts[@]} -eq 1 ]]; then
echo "${parts[0]}"
else
# Join multiple parts with " + "
local result="${parts[0]}"
for ((i=1; i<${#parts[@]}; i++)); do
result="$result + ${parts[i]}"
done
echo "$result"
fi
}
#==============================================================================
# Template and Content Generation Functions
#==============================================================================
get_project_structure() {
local project_type="$1"
if [[ "$project_type" == *"web"* ]]; then
echo "backend/\\nfrontend/\\ntests/"
else
echo "src/\\ntests/"
fi
}
get_commands_for_language() {
local lang="$1"
case "$lang" in
*"Python"*)
echo "cd src && pytest && ruff check ."
;;
*"Rust"*)
echo "cargo test && cargo clippy"
;;
*"JavaScript"*|*"TypeScript"*)
echo "npm test \&\& npm run lint"
;;
*)
echo "# Add commands for $lang"
;;
esac
}
get_language_conventions() {
local lang="$1"
echo "$lang: Follow standard conventions"
}
create_new_agent_file() {
local target_file="$1"
local temp_file="$2"
local project_name="$3"
local current_date="$4"
if [[ ! -f "$TEMPLATE_FILE" ]]; then
log_error "Template not found at $TEMPLATE_FILE"
return 1
fi
if [[ ! -r "$TEMPLATE_FILE" ]]; then
log_error "Template file is not readable: $TEMPLATE_FILE"
return 1
fi
log_info "Creating new agent context file from template..."
if ! cp "$TEMPLATE_FILE" "$temp_file"; then
log_error "Failed to copy template file"
return 1
fi
# Replace template placeholders
local project_structure
project_structure=$(get_project_structure "$NEW_PROJECT_TYPE")
local commands
commands=$(get_commands_for_language "$NEW_LANG")
local language_conventions
language_conventions=$(get_language_conventions "$NEW_LANG")
# Perform substitutions with error checking using safer approach
# Escape special characters for sed by using a different delimiter or escaping
local escaped_lang=$(printf '%s\n' "$NEW_LANG" | sed 's/[\[\.*^$()+{}|]/\\&/g')
local escaped_framework=$(printf '%s\n' "$NEW_FRAMEWORK" | sed 's/[\[\.*^$()+{}|]/\\&/g')
local escaped_branch=$(printf '%s\n' "$CURRENT_BRANCH" | sed 's/[\[\.*^$()+{}|]/\\&/g')
# Build technology stack and recent change strings conditionally
local tech_stack
if [[ -n "$escaped_lang" && -n "$escaped_framework" ]]; then
tech_stack="- $escaped_lang + $escaped_framework ($escaped_branch)"
elif [[ -n "$escaped_lang" ]]; then
tech_stack="- $escaped_lang ($escaped_branch)"
elif [[ -n "$escaped_framework" ]]; then
tech_stack="- $escaped_framework ($escaped_branch)"
else
tech_stack="- ($escaped_branch)"
fi
local recent_change
if [[ -n "$escaped_lang" && -n "$escaped_framework" ]]; then
recent_change="- $escaped_branch: Added $escaped_lang + $escaped_framework"
elif [[ -n "$escaped_lang" ]]; then
recent_change="- $escaped_branch: Added $escaped_lang"
elif [[ -n "$escaped_framework" ]]; then
recent_change="- $escaped_branch: Added $escaped_framework"
else
recent_change="- $escaped_branch: Added"
fi
local substitutions=(
"s|\[PROJECT NAME\]|$project_name|"
"s|\[DATE\]|$current_date|"
"s|\[EXTRACTED FROM ALL PLAN.MD FILES\]|$tech_stack|"
"s|\[ACTUAL STRUCTURE FROM PLANS\]|$project_structure|g"
"s|\[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES\]|$commands|"
"s|\[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE\]|$language_conventions|"
"s|\[LAST 3 FEATURES AND WHAT THEY ADDED\]|$recent_change|"
)
for substitution in "${substitutions[@]}"; do
if ! sed -i.bak -e "$substitution" "$temp_file"; then
log_error "Failed to perform substitution: $substitution"
rm -f "$temp_file" "$temp_file.bak"
return 1
fi
done
# Convert \n sequences to actual newlines
newline=$(printf '\n')
sed -i.bak2 "s/\\\\n/${newline}/g" "$temp_file"
# Clean up backup files
rm -f "$temp_file.bak" "$temp_file.bak2"
return 0
}
update_existing_agent_file() {
local target_file="$1"
local current_date="$2"
log_info "Updating existing agent context file..."
# Use a single temporary file for atomic update
local temp_file
temp_file=$(mktemp) || {
log_error "Failed to create temporary file"
return 1
}
# Process the file in one pass
local tech_stack=$(format_technology_stack "$NEW_LANG" "$NEW_FRAMEWORK")
local new_tech_entries=()
local new_change_entry=""
# Prepare new technology entries
if [[ -n "$tech_stack" ]] && ! grep -q "$tech_stack" "$target_file"; then
new_tech_entries+=("- $tech_stack ($CURRENT_BRANCH)")
fi
if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]] && [[ "$NEW_DB" != "NEEDS CLARIFICATION" ]] && ! grep -q "$NEW_DB" "$target_file"; then
new_tech_entries+=("- $NEW_DB ($CURRENT_BRANCH)")
fi
# Prepare new change entry
if [[ -n "$tech_stack" ]]; then
new_change_entry="- $CURRENT_BRANCH: Added $tech_stack"
elif [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]] && [[ "$NEW_DB" != "NEEDS CLARIFICATION" ]]; then
new_change_entry="- $CURRENT_BRANCH: Added $NEW_DB"
fi
# Process file line by line
local in_tech_section=false
local in_changes_section=false
local tech_entries_added=false
local changes_entries_added=false
local existing_changes_count=0
while IFS= read -r line || [[ -n "$line" ]]; do
# Handle Active Technologies section
if [[ "$line" == "## Active Technologies" ]]; then
echo "$line" >> "$temp_file"
in_tech_section=true
continue
elif [[ $in_tech_section == true ]] && [[ "$line" =~ ^##[[:space:]] ]]; then
# Add new tech entries before closing the section
if [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
tech_entries_added=true
fi
echo "$line" >> "$temp_file"
in_tech_section=false
continue
elif [[ $in_tech_section == true ]] && [[ -z "$line" ]]; then
# Add new tech entries before empty line in tech section
if [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
tech_entries_added=true
fi
echo "$line" >> "$temp_file"
continue
fi
# Handle Recent Changes section
if [[ "$line" == "## Recent Changes" ]]; then
echo "$line" >> "$temp_file"
# Add new change entry right after the heading
if [[ -n "$new_change_entry" ]]; then
echo "$new_change_entry" >> "$temp_file"
fi
in_changes_section=true
changes_entries_added=true
continue
elif [[ $in_changes_section == true ]] && [[ "$line" =~ ^##[[:space:]] ]]; then
echo "$line" >> "$temp_file"
in_changes_section=false
continue
elif [[ $in_changes_section == true ]] && [[ "$line" == "- "* ]]; then
# Keep only first 2 existing changes
if [[ $existing_changes_count -lt 2 ]]; then
echo "$line" >> "$temp_file"
((existing_changes_count++))
fi
continue
fi
# Update timestamp
if [[ "$line" =~ \*\*Last\ updated\*\*:.*[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] ]]; then
echo "$line" | sed "s/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]/$current_date/" >> "$temp_file"
else
echo "$line" >> "$temp_file"
fi
done < "$target_file"
# Post-loop check: if we're still in the Active Technologies section and haven't added new entries
if [[ $in_tech_section == true ]] && [[ $tech_entries_added == false ]] && [[ ${#new_tech_entries[@]} -gt 0 ]]; then
printf '%s\n' "${new_tech_entries[@]}" >> "$temp_file"
fi
# Move temp file to target atomically
if ! mv "$temp_file" "$target_file"; then
log_error "Failed to update target file"
rm -f "$temp_file"
return 1
fi
return 0
}
#==============================================================================
# Main Agent File Update Function
#==============================================================================
update_agent_file() {
local target_file="$1"
local agent_name="$2"
if [[ -z "$target_file" ]] || [[ -z "$agent_name" ]]; then
log_error "update_agent_file requires target_file and agent_name parameters"
return 1
fi
log_info "Updating $agent_name context file: $target_file"
local project_name
project_name=$(basename "$REPO_ROOT")
local current_date
current_date=$(date +%Y-%m-%d)
# Create directory if it doesn't exist
local target_dir
target_dir=$(dirname "$target_file")
if [[ ! -d "$target_dir" ]]; then
if ! mkdir -p "$target_dir"; then
log_error "Failed to create directory: $target_dir"
return 1
fi
fi
if [[ ! -f "$target_file" ]]; then
# Create new file from template
local temp_file
temp_file=$(mktemp) || {
log_error "Failed to create temporary file"
return 1
}
if create_new_agent_file "$target_file" "$temp_file" "$project_name" "$current_date"; then
if mv "$temp_file" "$target_file"; then
log_success "Created new $agent_name context file"
else
log_error "Failed to move temporary file to $target_file"
rm -f "$temp_file"
return 1
fi
else
log_error "Failed to create new agent file"
rm -f "$temp_file"
return 1
fi
else
# Update existing file
if [[ ! -r "$target_file" ]]; then
log_error "Cannot read existing file: $target_file"
return 1
fi
if [[ ! -w "$target_file" ]]; then
log_error "Cannot write to existing file: $target_file"
return 1
fi
if update_existing_agent_file "$target_file" "$current_date"; then
log_success "Updated existing $agent_name context file"
else
log_error "Failed to update existing agent file"
return 1
fi
fi
return 0
}
#==============================================================================
# Agent Selection and Processing
#==============================================================================
update_specific_agent() {
local agent_type="$1"
case "$agent_type" in
claude)
update_agent_file "$CLAUDE_FILE" "Claude Code"
;;
gemini)
update_agent_file "$GEMINI_FILE" "Gemini CLI"
;;
copilot)
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
;;
cursor-agent)
update_agent_file "$CURSOR_FILE" "Cursor IDE"
;;
qwen)
update_agent_file "$QWEN_FILE" "Qwen Code"
;;
opencode)
update_agent_file "$AGENTS_FILE" "opencode"
;;
codex)
update_agent_file "$AGENTS_FILE" "Codex CLI"
;;
windsurf)
update_agent_file "$WINDSURF_FILE" "Windsurf"
;;
kilocode)
update_agent_file "$KILOCODE_FILE" "Kilo Code"
;;
auggie)
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
;;
roo)
update_agent_file "$ROO_FILE" "Roo Code"
;;
codebuddy)
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
;;
q)
update_agent_file "$Q_FILE" "Amazon Q Developer CLI"
;;
*)
log_error "Unknown agent type '$agent_type'"
log_error "Expected: claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|roo|q"
exit 1
;;
esac
}
update_all_existing_agents() {
local found_agent=false
# Check each possible agent file and update if it exists
if [[ -f "$CLAUDE_FILE" ]]; then
update_agent_file "$CLAUDE_FILE" "Claude Code"
found_agent=true
fi
if [[ -f "$GEMINI_FILE" ]]; then
update_agent_file "$GEMINI_FILE" "Gemini CLI"
found_agent=true
fi
if [[ -f "$COPILOT_FILE" ]]; then
update_agent_file "$COPILOT_FILE" "GitHub Copilot"
found_agent=true
fi
if [[ -f "$CURSOR_FILE" ]]; then
update_agent_file "$CURSOR_FILE" "Cursor IDE"
found_agent=true
fi
if [[ -f "$QWEN_FILE" ]]; then
update_agent_file "$QWEN_FILE" "Qwen Code"
found_agent=true
fi
if [[ -f "$AGENTS_FILE" ]]; then
update_agent_file "$AGENTS_FILE" "Codex/opencode"
found_agent=true
fi
if [[ -f "$WINDSURF_FILE" ]]; then
update_agent_file "$WINDSURF_FILE" "Windsurf"
found_agent=true
fi
if [[ -f "$KILOCODE_FILE" ]]; then
update_agent_file "$KILOCODE_FILE" "Kilo Code"
found_agent=true
fi
if [[ -f "$AUGGIE_FILE" ]]; then
update_agent_file "$AUGGIE_FILE" "Auggie CLI"
found_agent=true
fi
if [[ -f "$ROO_FILE" ]]; then
update_agent_file "$ROO_FILE" "Roo Code"
found_agent=true
fi
if [[ -f "$CODEBUDDY_FILE" ]]; then
update_agent_file "$CODEBUDDY_FILE" "CodeBuddy CLI"
found_agent=true
fi
if [[ -f "$Q_FILE" ]]; then
update_agent_file "$Q_FILE" "Amazon Q Developer CLI"
found_agent=true
fi
# If no agent files exist, create a default Claude file
if [[ "$found_agent" == false ]]; then
log_info "No existing agent files found, creating default Claude file..."
update_agent_file "$CLAUDE_FILE" "Claude Code"
fi
}
print_summary() {
echo
log_info "Summary of changes:"
if [[ -n "$NEW_LANG" ]]; then
echo " - Added language: $NEW_LANG"
fi
if [[ -n "$NEW_FRAMEWORK" ]]; then
echo " - Added framework: $NEW_FRAMEWORK"
fi
if [[ -n "$NEW_DB" ]] && [[ "$NEW_DB" != "N/A" ]]; then
echo " - Added database: $NEW_DB"
fi
echo
log_info "Usage: $0 [claude|gemini|copilot|cursor-agent|qwen|opencode|codex|windsurf|kilocode|auggie|codebuddy|q]"
}
#==============================================================================
# Main Execution
#==============================================================================
main() {
# Validate environment before proceeding
validate_environment
log_info "=== Updating agent context files for feature $CURRENT_BRANCH ==="
# Parse the plan file to extract project information
if ! parse_plan_data "$NEW_PLAN"; then
log_error "Failed to parse plan data"
exit 1
fi
# Process based on agent type argument
local success=true
if [[ -z "$AGENT_TYPE" ]]; then
# No specific agent provided - update all existing agent files
log_info "No agent specified, updating all existing agent files..."
if ! update_all_existing_agents; then
success=false
fi
else
# Specific agent provided - update only that agent
log_info "Updating specific agent: $AGENT_TYPE"
if ! update_specific_agent "$AGENT_TYPE"; then
success=false
fi
fi
# Print summary
print_summary
if [[ "$success" == true ]]; then
log_success "Agent context update completed successfully"
exit 0
else
log_error "Agent context update completed with errors"
exit 1
fi
}
# Execute main function if script is run directly
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi

View File

@@ -0,0 +1,23 @@
# [PROJECT NAME] Development Guidelines
Auto-generated from all feature plans. Last updated: [DATE]
## Active Technologies
[EXTRACTED FROM ALL PLAN.MD FILES]
## Project Structure
```
[ACTUAL STRUCTURE FROM PLANS]
```
## Commands
[ONLY COMMANDS FOR ACTIVE TECHNOLOGIES]
## Code Style
[LANGUAGE-SPECIFIC, ONLY FOR LANGUAGES IN USE]
## Recent Changes
[LAST 3 FEATURES AND WHAT THEY ADDED]
<!-- MANUAL ADDITIONS START -->
<!-- MANUAL ADDITIONS END -->

View File

@@ -0,0 +1,41 @@
# [CHECKLIST TYPE] Checklist: [FEATURE NAME]
**Purpose**: [Brief description of what this checklist covers]
**Created**: [DATE]
**Feature**: [Link to spec.md or relevant documentation]
**Note**: This checklist is generated by the `/speckit.checklist` command based on feature context and requirements.
<!--
============================================================================
IMPORTANT: The checklist items below are SAMPLE ITEMS for illustration only.
The /speckit.checklist command MUST replace these with actual items based on:
- User's specific checklist request
- Feature requirements from spec.md
- Technical context from plan.md
- Implementation details from tasks.md
DO NOT keep these sample items in the generated checklist file.
============================================================================
-->
## [Category 1]
- [ ] CHK001 First checklist item with clear action
- [ ] CHK002 Second checklist item
- [ ] CHK003 Third checklist item
## [Category 2]
- [ ] CHK004 Another category item
- [ ] CHK005 Item with specific criteria
- [ ] CHK006 Final item in this category
## Notes
- Check items off as completed: `[x]`
- Add comments or findings inline
- Link to relevant resources or documentation
- Items are numbered sequentially for easy reference

View File

@@ -0,0 +1,105 @@
# Implementation Plan: [FEATURE]
**Branch**: `[###-feature-name]` | **Date**: [DATE] | **Spec**: [link]
**Input**: Feature specification from `/specs/[###-feature-name]/spec.md`
**Note**: This template is filled in by the `/speckit.plan` command. See `.specify/templates/commands/plan.md` for the execution workflow.
## Summary
[Extract from feature spec: primary requirement + technical approach from research]
## Technical Context
<!--
ACTION REQUIRED: Replace the content in this section with the technical details
for the project. The structure here is presented in advisory capacity to guide
the iteration process.
-->
**Language/Version**: [e.g., Python 3.11, Swift 5.9, Rust 1.75 or NEEDS CLARIFICATION]
**Primary Dependencies**: [e.g., FastAPI, UIKit, LLVM or NEEDS CLARIFICATION]
**Storage**: [if applicable, e.g., PostgreSQL, CoreData, files or N/A]
**Testing**: [e.g., pytest, XCTest, cargo test or NEEDS CLARIFICATION]
**Target Platform**: [e.g., Linux server, iOS 15+, WASM or NEEDS CLARIFICATION]
**Project Type**: [single/web/mobile - determines source structure]
**Performance Goals**: [domain-specific, e.g., 1000 req/s, 10k lines/sec, 60 fps or NEEDS CLARIFICATION]
**Constraints**: [domain-specific, e.g., <200ms p95, <100MB memory, offline-capable or NEEDS CLARIFICATION]
**Scale/Scope**: [domain-specific, e.g., 10k users, 1M LOC, 50 screens or NEEDS CLARIFICATION]
## Constitution Check
*GATE: Must pass before Phase 0 research. Re-check after Phase 1 design.*
[Gates determined based on constitution file]
## Project Structure
### Documentation (this feature)
```
specs/[###-feature]/
├── plan.md # This file (/speckit.plan command output)
├── research.md # Phase 0 output (/speckit.plan command)
├── data-model.md # Phase 1 output (/speckit.plan command)
├── quickstart.md # Phase 1 output (/speckit.plan command)
├── contracts/ # Phase 1 output (/speckit.plan command)
└── tasks.md # Phase 2 output (/speckit.tasks command - NOT created by /speckit.plan)
```
### Source Code (repository root)
<!--
ACTION REQUIRED: Replace the placeholder tree below with the concrete layout
for this feature. Delete unused options and expand the chosen structure with
real paths (e.g., apps/admin, packages/something). The delivered plan must
not include Option labels.
-->
```
# [REMOVE IF UNUSED] Option 1: Single project (DEFAULT)
src/
├── models/
├── services/
├── cli/
└── lib/
tests/
├── contract/
├── integration/
└── unit/
# [REMOVE IF UNUSED] Option 2: Web application (when "frontend" + "backend" detected)
backend/
├── src/
│ ├── models/
│ ├── services/
│ └── api/
└── tests/
frontend/
├── src/
│ ├── components/
│ ├── pages/
│ └── services/
└── tests/
# [REMOVE IF UNUSED] Option 3: Mobile + API (when "iOS/Android" detected)
api/
└── [same as backend above]
ios/ or android/
└── [platform-specific structure: feature modules, UI flows, platform tests]
```
**Structure Decision**: [Document the selected structure and reference the real
directories captured above]
## Complexity Tracking
*Fill ONLY if Constitution Check has violations that must be justified*
| Violation | Why Needed | Simpler Alternative Rejected Because |
|-----------|------------|-------------------------------------|
| [e.g., 4th project] | [current need] | [why 3 projects insufficient] |
| [e.g., Repository pattern] | [specific problem] | [why direct DB access insufficient] |

View File

@@ -0,0 +1,116 @@
# Feature Specification: [FEATURE NAME]
**Feature Branch**: `[###-feature-name]`
**Created**: [DATE]
**Status**: Draft
**Input**: User description: "$ARGUMENTS"
## User Scenarios & Testing *(mandatory)*
<!--
IMPORTANT: User stories should be PRIORITIZED as user journeys ordered by importance.
Each user story/journey must be INDEPENDENTLY TESTABLE - meaning if you implement just ONE of them,
you should still have a viable MVP (Minimum Viable Product) that delivers value.
Assign priorities (P1, P2, P3, etc.) to each story, where P1 is the most critical.
Think of each story as a standalone slice of functionality that can be:
- Developed independently
- Tested independently
- Deployed independently
- Demonstrated to users independently
-->
### User Story 1 - [Brief Title] (Priority: P1)
[Describe this user journey in plain language]
**Why this priority**: [Explain the value and why it has this priority level]
**Independent Test**: [Describe how this can be tested independently - e.g., "Can be fully tested by [specific action] and delivers [specific value]"]
**Acceptance Scenarios**:
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
2. **Given** [initial state], **When** [action], **Then** [expected outcome]
---
### User Story 2 - [Brief Title] (Priority: P2)
[Describe this user journey in plain language]
**Why this priority**: [Explain the value and why it has this priority level]
**Independent Test**: [Describe how this can be tested independently]
**Acceptance Scenarios**:
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
---
### User Story 3 - [Brief Title] (Priority: P3)
[Describe this user journey in plain language]
**Why this priority**: [Explain the value and why it has this priority level]
**Independent Test**: [Describe how this can be tested independently]
**Acceptance Scenarios**:
1. **Given** [initial state], **When** [action], **Then** [expected outcome]
---
[Add more user stories as needed, each with an assigned priority]
### Edge Cases
<!--
ACTION REQUIRED: The content in this section represents placeholders.
Fill them out with the right edge cases.
-->
- What happens when [boundary condition]?
- How does system handle [error scenario]?
## Requirements *(mandatory)*
<!--
ACTION REQUIRED: The content in this section represents placeholders.
Fill them out with the right functional requirements.
-->
### Functional Requirements
- **FR-001**: System MUST [specific capability, e.g., "allow users to create accounts"]
- **FR-002**: System MUST [specific capability, e.g., "validate email addresses"]
- **FR-003**: Users MUST be able to [key interaction, e.g., "reset their password"]
- **FR-004**: System MUST [data requirement, e.g., "persist user preferences"]
- **FR-005**: System MUST [behavior, e.g., "log all security events"]
*Example of marking unclear requirements:*
- **FR-006**: System MUST authenticate users via [NEEDS CLARIFICATION: auth method not specified - email/password, SSO, OAuth?]
- **FR-007**: System MUST retain user data for [NEEDS CLARIFICATION: retention period not specified]
### Key Entities *(include if feature involves data)*
- **[Entity 1]**: [What it represents, key attributes without implementation]
- **[Entity 2]**: [What it represents, relationships to other entities]
## Success Criteria *(mandatory)*
<!--
ACTION REQUIRED: Define measurable success criteria.
These must be technology-agnostic and measurable.
-->
### Measurable Outcomes
- **SC-001**: [Measurable metric, e.g., "Users can complete account creation in under 2 minutes"]
- **SC-002**: [Measurable metric, e.g., "System handles 1000 concurrent users without degradation"]
- **SC-003**: [User satisfaction metric, e.g., "90% of users successfully complete primary task on first attempt"]
- **SC-004**: [Business metric, e.g., "Reduce support tickets related to [X] by 50%"]

View File

@@ -0,0 +1,251 @@
---
description: "Task list template for feature implementation"
---
# Tasks: [FEATURE NAME]
**Input**: Design documents from `/specs/[###-feature-name]/`
**Prerequisites**: plan.md (required), spec.md (required for user stories), research.md, data-model.md, contracts/
**Tests**: The examples below include test tasks. Tests are OPTIONAL - only include them if explicitly requested in the feature specification.
**Organization**: Tasks are grouped by user story to enable independent implementation and testing of each story.
## Format: `[ID] [P?] [Story] Description`
- **[P]**: Can run in parallel (different files, no dependencies)
- **[Story]**: Which user story this task belongs to (e.g., US1, US2, US3)
- Include exact file paths in descriptions
## Path Conventions
- **Single project**: `src/`, `tests/` at repository root
- **Web app**: `backend/src/`, `frontend/src/`
- **Mobile**: `api/src/`, `ios/src/` or `android/src/`
- Paths shown below assume single project - adjust based on plan.md structure
<!--
============================================================================
IMPORTANT: The tasks below are SAMPLE TASKS for illustration purposes only.
The /speckit.tasks command MUST replace these with actual tasks based on:
- User stories from spec.md (with their priorities P1, P2, P3...)
- Feature requirements from plan.md
- Entities from data-model.md
- Endpoints from contracts/
Tasks MUST be organized by user story so each story can be:
- Implemented independently
- Tested independently
- Delivered as an MVP increment
DO NOT keep these sample tasks in the generated tasks.md file.
============================================================================
-->
## Phase 1: Setup (Shared Infrastructure)
**Purpose**: Project initialization and basic structure
- [ ] T001 Create project structure per implementation plan
- [ ] T002 Initialize [language] project with [framework] dependencies
- [ ] T003 [P] Configure linting and formatting tools
---
## Phase 2: Foundational (Blocking Prerequisites)
**Purpose**: Core infrastructure that MUST be complete before ANY user story can be implemented
**⚠️ CRITICAL**: No user story work can begin until this phase is complete
Examples of foundational tasks (adjust based on your project):
- [ ] T004 Setup database schema and migrations framework
- [ ] T005 [P] Implement authentication/authorization framework
- [ ] T006 [P] Setup API routing and middleware structure
- [ ] T007 Create base models/entities that all stories depend on
- [ ] T008 Configure error handling and logging infrastructure
- [ ] T009 Setup environment configuration management
**Checkpoint**: Foundation ready - user story implementation can now begin in parallel
---
## Phase 3: User Story 1 - [Title] (Priority: P1) 🎯 MVP
**Goal**: [Brief description of what this story delivers]
**Independent Test**: [How to verify this story works on its own]
### Tests for User Story 1 (OPTIONAL - only if tests requested) ⚠️
**NOTE: Write these tests FIRST, ensure they FAIL before implementation**
- [ ] T010 [P] [US1] Contract test for [endpoint] in tests/contract/test_[name].py
- [ ] T011 [P] [US1] Integration test for [user journey] in tests/integration/test_[name].py
### Implementation for User Story 1
- [ ] T012 [P] [US1] Create [Entity1] model in src/models/[entity1].py
- [ ] T013 [P] [US1] Create [Entity2] model in src/models/[entity2].py
- [ ] T014 [US1] Implement [Service] in src/services/[service].py (depends on T012, T013)
- [ ] T015 [US1] Implement [endpoint/feature] in src/[location]/[file].py
- [ ] T016 [US1] Add validation and error handling
- [ ] T017 [US1] Add logging for user story 1 operations
**Checkpoint**: At this point, User Story 1 should be fully functional and testable independently
---
## Phase 4: User Story 2 - [Title] (Priority: P2)
**Goal**: [Brief description of what this story delivers]
**Independent Test**: [How to verify this story works on its own]
### Tests for User Story 2 (OPTIONAL - only if tests requested) ⚠️
- [ ] T018 [P] [US2] Contract test for [endpoint] in tests/contract/test_[name].py
- [ ] T019 [P] [US2] Integration test for [user journey] in tests/integration/test_[name].py
### Implementation for User Story 2
- [ ] T020 [P] [US2] Create [Entity] model in src/models/[entity].py
- [ ] T021 [US2] Implement [Service] in src/services/[service].py
- [ ] T022 [US2] Implement [endpoint/feature] in src/[location]/[file].py
- [ ] T023 [US2] Integrate with User Story 1 components (if needed)
**Checkpoint**: At this point, User Stories 1 AND 2 should both work independently
---
## Phase 5: User Story 3 - [Title] (Priority: P3)
**Goal**: [Brief description of what this story delivers]
**Independent Test**: [How to verify this story works on its own]
### Tests for User Story 3 (OPTIONAL - only if tests requested) ⚠️
- [ ] T024 [P] [US3] Contract test for [endpoint] in tests/contract/test_[name].py
- [ ] T025 [P] [US3] Integration test for [user journey] in tests/integration/test_[name].py
### Implementation for User Story 3
- [ ] T026 [P] [US3] Create [Entity] model in src/models/[entity].py
- [ ] T027 [US3] Implement [Service] in src/services/[service].py
- [ ] T028 [US3] Implement [endpoint/feature] in src/[location]/[file].py
**Checkpoint**: All user stories should now be independently functional
---
[Add more user story phases as needed, following the same pattern]
---
## Phase N: Polish & Cross-Cutting Concerns
**Purpose**: Improvements that affect multiple user stories
- [ ] TXXX [P] Documentation updates in docs/
- [ ] TXXX Code cleanup and refactoring
- [ ] TXXX Performance optimization across all stories
- [ ] TXXX [P] Additional unit tests (if requested) in tests/unit/
- [ ] TXXX Security hardening
- [ ] TXXX Run quickstart.md validation
---
## Dependencies & Execution Order
### Phase Dependencies
- **Setup (Phase 1)**: No dependencies - can start immediately
- **Foundational (Phase 2)**: Depends on Setup completion - BLOCKS all user stories
- **User Stories (Phase 3+)**: All depend on Foundational phase completion
- User stories can then proceed in parallel (if staffed)
- Or sequentially in priority order (P1 → P2 → P3)
- **Polish (Final Phase)**: Depends on all desired user stories being complete
### User Story Dependencies
- **User Story 1 (P1)**: Can start after Foundational (Phase 2) - No dependencies on other stories
- **User Story 2 (P2)**: Can start after Foundational (Phase 2) - May integrate with US1 but should be independently testable
- **User Story 3 (P3)**: Can start after Foundational (Phase 2) - May integrate with US1/US2 but should be independently testable
### Within Each User Story
- Tests (if included) MUST be written and FAIL before implementation
- Models before services
- Services before endpoints
- Core implementation before integration
- Story complete before moving to next priority
### Parallel Opportunities
- All Setup tasks marked [P] can run in parallel
- All Foundational tasks marked [P] can run in parallel (within Phase 2)
- Once Foundational phase completes, all user stories can start in parallel (if team capacity allows)
- All tests for a user story marked [P] can run in parallel
- Models within a story marked [P] can run in parallel
- Different user stories can be worked on in parallel by different team members
---
## Parallel Example: User Story 1
```bash
# Launch all tests for User Story 1 together (if tests requested):
Task: "Contract test for [endpoint] in tests/contract/test_[name].py"
Task: "Integration test for [user journey] in tests/integration/test_[name].py"
# Launch all models for User Story 1 together:
Task: "Create [Entity1] model in src/models/[entity1].py"
Task: "Create [Entity2] model in src/models/[entity2].py"
```
---
## Implementation Strategy
### MVP First (User Story 1 Only)
1. Complete Phase 1: Setup
2. Complete Phase 2: Foundational (CRITICAL - blocks all stories)
3. Complete Phase 3: User Story 1
4. **STOP and VALIDATE**: Test User Story 1 independently
5. Deploy/demo if ready
### Incremental Delivery
1. Complete Setup + Foundational → Foundation ready
2. Add User Story 1 → Test independently → Deploy/Demo (MVP!)
3. Add User Story 2 → Test independently → Deploy/Demo
4. Add User Story 3 → Test independently → Deploy/Demo
5. Each story adds value without breaking previous stories
### Parallel Team Strategy
With multiple developers:
1. Team completes Setup + Foundational together
2. Once Foundational is done:
- Developer A: User Story 1
- Developer B: User Story 2
- Developer C: User Story 3
3. Stories complete and integrate independently
---
## Notes
- [P] tasks = different files, no dependencies
- [Story] label maps task to specific user story for traceability
- Each user story should be independently completable and testable
- Verify tests fail before implementing
- Commit after each task or logical group
- Stop at any checkpoint to validate story independently
- Avoid: vague tasks, same file conflicts, cross-story dependencies that break independence

View File

@@ -54,7 +54,7 @@ pnpm install
- Run all required docker containers in the development, we already configured all containers under `docker-compose.yml`.
```
docker-compose up -d
docker compose up -d
```
Wait some seconds, and hit `docker-compose ps` and you should see the same result below.
@@ -75,7 +75,7 @@ pnpm run build:server
- Run the database migration for system database.
```
node packages/server/build/commands.js system:migrate:latest
pnpm run system:migrate:latest
```
And you should get something like that.
@@ -84,10 +84,10 @@ And you should get something like that.
Batch 1 run: 6 migrations
```
- Next, start the webapp application.
- Next, start the server.
```
pnpm run dev:server
pnpm run server:start
```
**[`^top^`](#)**
@@ -96,12 +96,6 @@ pnpm run dev:server
## Contribute to Frontend
- Clone the `bigcapital` repository and cd into `bigcapital` directory.
```
git clone https://github.com/bigcapital/bigcapital.git && cd bigcaptial
```
- Install all npm dependencies of the monorepo, you don't have to change directory to the `frontend` package. just hit that command and will install all packages across all application.
```
@@ -138,4 +132,4 @@ There are many other ways to get involved with the community and to participate
Again, Feel free to ping us on [`#contributing`](https://discord.com/invite/c8nPBJafeb) on our Discord community if you need any help on this :)
Thank You!
Thank You!

View File

@@ -34,8 +34,6 @@
</p>
</p>
> We are currently in the process of migrating all server-side API endpoints to NestJS to establish a more solid architecture. Some endpoints in development mode may be temporarily do not work during this stabilization phase. However, this migration doesn't affect the production Docker images, which remain on the latest stable version.
# What's Bigcapital?
Bigcapital is a smart and open-source accounting and inventory software, Bigcapital keeps all business finances in right place and automates accounting processes to give the business powerful and intelligent financial statements and reports to help in making decisions.
@@ -80,6 +78,7 @@ You can integrate Bigcapital API with your system to organize your transactions
# Resources
- [Documentation](https://docs.bigcapital.app/) - Learn how to use.
- [API Reference](https://docs.bigcapital.app/api-reference) - API reference docs
- [Contribution](https://github.com/bigcapitalhq/bigcapital/blob/develop/CONTRIBUTING.md) - Welcome to any contributions.
- [Discord](https://discord.com/invite/c8nPBJafeb) - Ask for help.
- [Bug Tracker](https://github.com/bigcapitalhq/bigcapital/issues) - Notify us new bugs.
@@ -136,6 +135,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
<td align="center" valign="top" width="14.28%"><a href="https://myself.vercel.app/"><img src="https://avatars.githubusercontent.com/u/42431274?v=4?s=100" width="100px;" alt="Sachin Mittal"/><br /><sub><b>Sachin Mittal</b></sub></a><br /><a href="https://github.com/bigcapitalhq/bigcapital/issues?q=author%3Amittalsam98" title="Bug reports">🐛</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://www.camilooviedo.com/"><img src="https://avatars.githubusercontent.com/u/64604272?v=4?s=100" width="100px;" alt="Camilo Oviedo"/><br /><sub><b>Camilo Oviedo</b></sub></a><br /><a href="https://github.com/bigcapitalhq/bigcapital/commits?author=Champetaman" title="Code">💻</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://nklmantey.com/"><img src="https://avatars.githubusercontent.com/u/90279429?v=4?s=100" width="100px;" alt="Mantey"/><br /><sub><b>Mantey</b></sub></a><br /><a href="https://github.com/bigcapitalhq/bigcapital/issues?q=author%3Anklmantey" title="Bug reports">🐛</a></td>
<td align="center" valign="top" width="14.28%"><a href="https://d.sb/"><img src="https://avatars.githubusercontent.com/u/91933?v=4?s=100" width="100px;" alt="Daniel Lo Nigro"/><br /><sub><b>Daniel Lo Nigro</b></sub></a><br /><a href="https://github.com/bigcapitalhq/bigcapital/issues?q=author%3ADaniel15" title="Bug reports">🐛</a> <a href="https://github.com/bigcapitalhq/bigcapital/commits?author=Daniel15" title="Code">💻</a></td>
</tr>
</tbody>
</table>

View File

@@ -32,11 +32,9 @@ services:
- '3000'
links:
- mysql
- mongo
- redis
depends_on:
- mysql
- mongo
- redis
restart: on-failure
networks:
@@ -60,22 +58,21 @@ services:
# System database
- SYSTEM_DB_NAME=${SYSTEM_DB_NAME}
# Redis
- REDIS_HOST=redis
- REDIS_PORT=6379
- QUEUE_HOST=redis
- QUEUE_PORT=6379
# Tenants databases
- TENANT_DB_NAME_PERFIX=${TENANT_DB_NAME_PERFIX}
# Authentication
- JWT_SECRET=${JWT_SECRET}
# MongoDB
- MONGODB_DATABASE_URL=mongodb://mongo/bigcapital
# Application
- BASE_URL=${BASE_URL}
# Agendash
- AGENDASH_AUTH_USER=${AGENDASH_AUTH_USER}
- AGENDASH_AUTH_PASSWORD=${AGENDASH_AUTH_PASSWORD}
# Sign-up restrictions
- SIGNUP_DISABLED=${SIGNUP_DISABLED}
- SIGNUP_ALLOWED_DOMAINS=${SIGNUP_ALLOWED_DOMAINS}
@@ -93,7 +90,7 @@ services:
- OPEN_EXCHANGE_RATE_APP_ID-${OPEN_EXCHANGE_RATE_APP_ID}
# Bank Sync
- BANKING_CONNECT=${BANKING_CONNECT}
- BANK_FEED_ENABLED=${BANK_FEED_ENABLED}
# Plaid
- PLAID_ENV=${PLAID_ENV}
@@ -123,6 +120,13 @@ services:
- S3_ENDPOINT=${S3_ENDPOINT}
- S3_BUCKET=${S3_BUCKET}
# Stripe
- STRIPE_PAYMENT_SECRET_KEY=${STRIPE_PAYMENT_SECRET_KEY}
- STRIPE_PAYMENT_PUBLISHABLE_KEY=${STRIPE_PAYMENT_PUBLISHABLE_KEY}
- STRIPE_PAYMENT_CLIENT_ID=${STRIPE_PAYMENT_CLIENT_ID}
- STRIPE_PAYMENT_WEBHOOKS_SECRET=${STRIPE_PAYMENT_WEBHOOKS_SECRET}
- STRIPE_PAYMENT_REDIRECT_URL=${STRIPE_PAYMENT_REDIRECT_URL}
database_migration:
container_name: bigcapital-database-migration
build:
@@ -159,17 +163,6 @@ services:
networks:
- bigcapital_network
mongo:
container_name: bigcapital-mongo
restart: on-failure
build: ./docker/mongo
expose:
- '27017'
volumes:
- mongo:/var/lib/mongodb
networks:
- bigcapital_network
redis:
container_name: bigcapital-redis
restart: on-failure
@@ -195,10 +188,6 @@ volumes:
name: bigcapital_prod_mysql
driver: local
mongo:
name: bigcapital_prod_mongo
driver: local
redis:
name: bigcapital_prod_redis
driver: local

View File

@@ -24,25 +24,13 @@ services:
restart_policy:
condition: unless-stopped
mongo:
build: ./docker/mongo
expose:
- '27017'
volumes:
- mongo:/var/lib/mongodb
ports:
- '27017:27017'
deploy:
restart_policy:
condition: unless-stopped
redis:
build:
context: ./docker/redis
expose:
- "6379"
- '6379'
ports:
- "6379:6379"
- '6379:6379'
volumes:
- redis:/data
deploy:
@@ -52,7 +40,7 @@ services:
gotenberg:
image: gotenberg/gotenberg:7
ports:
- "9000:3000"
- '9000:3000'
# Volumes
volumes:
@@ -60,10 +48,6 @@ volumes:
name: bigcapital_dev_mysql
driver: local
mongo:
name: bigcapital_dev_mongo
driver: local
redis:
name: bigcapital_dev_redis
driver: local
driver: local

View File

@@ -35,4 +35,4 @@ WORKDIR /app/packages/server
RUN git clone https://github.com/vishnubob/wait-for-it.git
# Once we listen the mysql port run the migration task.
CMD ./wait-for-it/wait-for-it.sh mysql:3306 -- sh -c "node ./build/commands.js system:migrate:latest && node ./build/commands.js tenants:migrate:latest"
CMD ./wait-for-it/wait-for-it.sh mysql:3306 -- sh -c "node dist/cli.js system:migrate:latest && node dist/cli.js tenants:migrate:latest"

View File

@@ -1 +0,0 @@
FROM mongo:5.0

View File

@@ -1,4 +1,4 @@
FROM redis:6.2.0
FROM redis:6.2.21
COPY redis.conf /usr/local/etc/redis/redis.conf

View File

@@ -4,16 +4,25 @@
"scripts": {
"dev": "lerna run dev",
"build": "lerna run build",
"typecheck": "lerna run typecheck",
"dev:webapp": "lerna run dev --scope \"@bigcapital/webapp\" --scope \"@bigcapital/utils\" --scope \"@bigcapital/pdf-templates\"",
"build:webapp": "lerna run build --scope \"@bigcapital/webapp\" --scope \"@bigcapital/utils\" --scope \"@bigcapital/pdf-templates\"",
"dev:server": "lerna run dev --scope \"@bigcapital/server\" --scope \"@bigcapital/utils\" --scope \"@bigcapital/pdf-templates\" --scope \"@bigcapital/email-components\"",
"build:server": "lerna run build --scope \"@bigcapital/server\" --scope \"@bigcapital/utils\" --scope \"@bigcapital/pdf-templates\" --scope \"@bigcapital/email-components\"",
"serve:server": "lerna run serve --scope \"@bigcapital/server\" --scope \"@bigcapital/utils\"",
"server2:start": "lerna run start:dev --scope \"@bigcapital/server2\"",
"server:start": "lerna run start:dev --scope \"@bigcapital/server\"",
"test:watch": "lerna run test:watch",
"test:e2e": "lerna run test:e2e",
"start:debug": "lerna run start:debug",
"prepare": "husky install"
"prepare": "husky install",
"system:migrate:make": "lerna run cli:system:migrate:make --scope \"@bigcapital/server\"",
"tenants:migrate:make": "lerna run cli:tenants:migrate:make --scope \"@bigcapital/server\"",
"system:migrate:rollback": "lerna run cli:system:migrate:rollback --scope \"@bigcapital/server\"",
"tenants:migrate:rollback": "lerna run cli:tenants:migrate:rollback --scope \"@bigcapital/server\"",
"system:migrate:latest": "lerna run cli:system:migrate:latest --scope \"@bigcapital/server\"",
"tenants:migrate:latest": "lerna run cli:tenants:migrate:latest --scope \"@bigcapital/server\"",
"system:seed:latest": "lerna run cli:system:seed:latest --scope \"@bigcapital/server\"",
"tenants:seed:latest": "lerna run cli:tenants:seed:latest --scope \"@bigcapital/server\""
},
"devDependencies": {
"@commitlint/cli": "^17.4.2",

View File

@@ -35,17 +35,10 @@ TENANT_DB_NAME_PERFIX=bigcapital_tenant_
BASE_URL=http://example.com
JWT_SECRET=b0JDZW56RnV6aEthb0RGPXVEcUI
# Jobs MongoDB
MONGODB_DATABASE_URL=mongodb://localhost/bigcapital
# App proxy
PUBLIC_PROXY_PORT=80
PUBLIC_PROXY_SSL_PORT=443
# Agendash
AGENDASH_AUTH_USER=agendash
AGENDASH_AUTH_PASSWORD=123123
# Sign-up restrictions
SIGNUP_DISABLED=false
SIGNUP_ALLOWED_DOMAINS=

102
packages/server/Dockerfile Normal file
View File

@@ -0,0 +1,102 @@
# Stage 1: Build
FROM node:18.16.0-alpine AS builder
WORKDIR /app
# Install pnpm
RUN npm install -g pnpm@8.10.2
# Install build dependencies
RUN apk add --no-cache python3 build-base chromium
# Set Python environment
ENV PYTHON=/usr/bin/python3
# Copy package files for dependency installation
COPY --chown=node:node package.json pnpm-lock.yaml pnpm-workspace.yaml lerna.json ./
COPY --chown=node:node packages/server/package.json ./packages/server/
COPY --chown=node:node shared/bigcapital-utils/package.json ./shared/bigcapital-utils/
COPY --chown=node:node shared/pdf-templates/package.json ./shared/pdf-templates/
COPY --chown=node:node shared/email-components/package.json ./shared/email-components/
# Install all dependencies (including devDependencies for build)
RUN pnpm install --frozen-lockfile
# Copy source code
COPY --chown=node:node ./packages/server ./packages/server
COPY --chown=node:node ./shared/bigcapital-utils ./shared/bigcapital-utils
COPY --chown=node:node ./shared/pdf-templates ./shared/pdf-templates
COPY --chown=node:node ./shared/email-components ./shared/email-components
# Build NestJS application
RUN pnpm run build:server --skip-nx-cache
# Stage 2: Production
FROM node:18.16.0-alpine AS production
WORKDIR /app
# Install pnpm for production
RUN npm install -g pnpm@8.10.2
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
# Install build dependencies for native modules (bcrypt, etc.)
RUN apk add --no-cache python3 build-base
# Set Python environment
ENV PYTHON=/usr/bin/python3
# Copy package files for production dependency installation
COPY --chown=nodejs:nodejs package.json pnpm-lock.yaml pnpm-workspace.yaml ./
COPY --chown=nodejs:nodejs packages/server/package.json ./packages/server/
COPY --chown=nodejs:nodejs shared/bigcapital-utils/package.json ./shared/bigcapital-utils/
COPY --chown=nodejs:nodejs shared/pdf-templates/package.json ./shared/pdf-templates/
COPY --chown=nodejs:nodejs shared/email-components/package.json ./shared/email-components/
# Copy .husky directory (needed for husky install command)
COPY --chown=nodejs:nodejs .husky ./.husky
# Install only production dependencies
# Install husky temporarily so prepare script can run, then remove it
RUN pnpm add -D -w husky && \
pnpm install --prod --frozen-lockfile && \
pnpm remove -w husky && \
# Remove build dependencies to reduce image size
apk del python3 build-base
# Copy built application from builder stage
COPY --from=builder --chown=nodejs:nodejs /app/packages/server/dist ./packages/server/dist
# Copy static assets (i18n, public, static directories)
COPY --from=builder --chown=nodejs:nodejs /app/packages/server/src/i18n ./packages/server/dist/i18n
COPY --from=builder --chown=nodejs:nodejs /app/packages/server/public ./packages/server/public
COPY --from=builder --chown=nodejs:nodejs /app/packages/server/static ./packages/server/static
# Copy database migration files (needed for running migrations)
COPY --from=builder --chown=nodejs:nodejs /app/packages/server/src/database ./packages/server/src/database
# Copy built shared packages (dist folders and package.json for module resolution)
COPY --from=builder --chown=nodejs:nodejs /app/shared/bigcapital-utils/dist ./shared/bigcapital-utils/dist
COPY --from=builder --chown=nodejs:nodejs /app/shared/pdf-templates/dist ./shared/pdf-templates/dist
COPY --from=builder --chown=nodejs:nodejs /app/shared/email-components/dist ./shared/email-components/dist
# Set runtime environment variables (these should be provided at runtime via docker-compose or k8s)
ENV NODE_ENV=production
ENV NEW_RELIC_NO_CONFIG_FILE=true
ENV PORT=3000
# Switch to non-root user
USER nodejs
# Expose port
EXPOSE 3000
# Health check - uses /api/system_db ping endpoint
HEALTHCHECK --interval=30s --timeout=3s --start-period=40s --retries=3 \
CMD node -e "require('http').get('http://localhost:3000/api/system_db', (r) => {process.exit(r.statusCode >= 200 && r.statusCode < 300 ? 0 : 1)}).on('error', () => process.exit(1))"
# Start the application
CMD [ "node", "packages/server/dist/main.js" ]

View File

@@ -2,10 +2,23 @@
"$schema": "https://json.schemastore.org/nest-cli",
"collection": "@nestjs/schematics",
"sourceRoot": "src",
"entryFile": "main",
"compilerOptions": {
"deleteOutDir": true,
"assets": [
{ "include": "i18n/**/*", "watchAssets": true }
{ "include": "i18n/**/*", "watchAssets": true },
{ "include": "database/**/*", "exclude": "**/*.ts", "watchAssets": true }
]
},
"projects": {
"cli": {
"type": "application",
"root": "src",
"entryFile": "cli",
"sourceRoot": "src",
"compilerOptions": {
"tsConfigPath": "tsconfig.json"
}
}
}
}

View File

@@ -1,5 +1,5 @@
{
"name": "@bigcapital/server2",
"name": "@bigcapital/server",
"version": "0.0.1",
"description": "",
"author": "",
@@ -13,11 +13,22 @@
"start:debug": "nest start --debug --watch",
"start:prod": "node dist/main",
"lint": "eslint \"{src,apps,libs,test}/**/*.ts\" --fix",
"typecheck": "tsc --noEmit",
"test": "jest",
"test:watch": "jest --watch",
"test:cov": "jest --coverage",
"test:debug": "node --inspect-brk -r tsconfig-paths/register -r ts-node/register node_modules/.bin/jest --runInBand",
"test:e2e": "jest --config ./test/jest-e2e.json --watchAll"
"test:e2e": "jest --config ./test/jest-e2e.json --watchAll",
"cli": "ts-node -r tsconfig-paths/register src/cli.ts",
"cli:system:migrate:latest": "ts-node -r tsconfig-paths/register src/cli.ts system:migrate:latest",
"cli:system:migrate:rollback": "ts-node -r tsconfig-paths/register src/cli.ts system:migrate:rollback",
"cli:system:migrate:make": "ts-node -r tsconfig-paths/register src/cli.ts system:migrate:make",
"cli:tenants:migrate:latest": "ts-node -r tsconfig-paths/register src/cli.ts tenants:migrate:latest",
"cli:tenants:migrate:rollback": "ts-node -r tsconfig-paths/register src/cli.ts tenants:migrate:rollback",
"cli:tenants:migrate:make": "ts-node -r tsconfig-paths/register src/cli.ts tenants:migrate:make",
"cli:tenants:list": "ts-node -r tsconfig-paths/register src/cli.ts tenants:list",
"cli:system:seed:latest": "ts-node -r tsconfig-paths/register src/cli.ts system:seed:latest",
"cli:tenants:seed:latest": "ts-node -r tsconfig-paths/register src/cli.ts tenants:seed:latest"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.576.0",
@@ -28,6 +39,10 @@
"@casl/ability": "^5.4.3",
"@lemonsqueezy/lemonsqueezy.js": "^2.2.0",
"@liaoliaots/nestjs-redis": "^10.0.0",
"@nest-lab/throttler-storage-redis": "^1.1.0",
"@bull-board/api": "^5.22.0",
"@bull-board/express": "^5.22.0",
"@bull-board/nestjs": "^5.22.0",
"@nestjs/bull": "^10.2.1",
"@nestjs/bullmq": "^10.2.2",
"@nestjs/cache-manager": "^2.2.2",
@@ -38,12 +53,16 @@
"@nestjs/jwt": "^10.2.0",
"@nestjs/passport": "^11.0.5",
"@nestjs/platform-express": "^10.0.0",
"@nestjs/platform-socket.io": "^10.0.0",
"@nestjs/schedule": "^4.1.2",
"@nestjs/serve-static": "^5.0.3",
"@nestjs/swagger": "^7.4.2",
"@nestjs/throttler": "^6.2.1",
"@nestjs/websockets": "^10.0.0",
"@supercharge/promise-pool": "^3.2.0",
"@types/multer": "^1.4.11",
"@types/nodemailer": "^6.4.17",
"@types/passport-google-oauth20": "^2.0.16",
"@types/passport-local": "^1.0.38",
"@types/ramda": "^0.30.2",
"accounting": "^0.4.1",
@@ -79,6 +98,7 @@
"multer-s3": "^3.0.1",
"mysql": "^2.18.1",
"mysql2": "^3.11.3",
"nest-commander": "^3.20.1",
"nestjs-cls": "^5.2.0",
"nestjs-i18n": "^10.4.9",
"nestjs-redis": "^1.3.3",
@@ -86,6 +106,8 @@
"object-hash": "^2.0.3",
"objection": "^3.1.5",
"passport": "^0.7.0",
"passport-google-oauth20": "^2.0.0",
"passport-headerapikey": "^1.2.2",
"passport-jwt": "^4.0.1",
"passport-local": "^1.0.0",
"plaid": "^10.3.0",
@@ -98,6 +120,7 @@
"remeda": "^2.19.2",
"rxjs": "^7.8.1",
"serialize-interceptor": "^1.1.7",
"socket.io": "^4.8.1",
"strategy": "^1.1.1",
"stripe": "^16.10.0",
"uniqid": "^5.2.0",
@@ -147,6 +170,9 @@
"**/*.(t|j)s"
],
"coverageDirectory": "../coverage",
"testEnvironment": "node"
"testEnvironment": "node",
"moduleNameMapper": {
"^@/(.*)$": "<rootDir>/$1"
}
}
}

0
packages/server/public/pdf/.gitignore vendored Normal file
View File

View File

@@ -0,0 +1,8 @@
import { CommandFactory } from 'nest-commander';
import { CLIModule } from './modules/CLI/CLI.module';
async function bootstrap() {
await CommandFactory.run(CLIModule);
}
bootstrap();

View File

@@ -0,0 +1,5 @@
import { registerAs } from '@nestjs/config';
export default registerAs('app', () => ({
baseUrl: process.env.BASE_URL,
}));

View File

@@ -0,0 +1,7 @@
import { registerAs } from '@nestjs/config';
export default registerAs('bankfeed', () => ({
enabled:
process.env.BANK_FEED_ENABLED === 'true' ||
process.env.BANK_FEED_ENABLED === 'yes',
}));

View File

@@ -0,0 +1,8 @@
import { registerAs } from '@nestjs/config';
import { parseBoolean } from '@/utils/parse-boolean';
export default registerAs('bullBoard', () => ({
enabled: parseBoolean<boolean>(process.env.BULL_BOARD_ENABLED, false),
username: process.env.BULL_BOARD_USERNAME,
password: process.env.BULL_BOARD_PASSWORD,
}));

View File

@@ -0,0 +1,5 @@
import { registerAs } from '@nestjs/config';
export default registerAs('cloud', () => ({
hostedOnCloud: process.env.HOSTED_ON_BIGCAPITAL_CLOUD === 'true',
}));

View File

@@ -1,3 +1,4 @@
import app from './app';
import systemDatabase from './system-database';
import tenantDatabase from './tenant-database';
import signup from './signup';
@@ -13,9 +14,17 @@ import signupRestrictions from './signup-restrictions';
import jwt from './jwt';
import mail from './mail';
import loops from './loops';
import bankfeed from './bankfeed';
import throttle from './throttle';
import cloud from './cloud';
import redis from './redis';
import queue from './queue';
import bullBoard from './bull-board';
export const config = [
app,
systemDatabase,
cloud,
tenantDatabase,
signup,
gotenberg,
@@ -29,5 +38,10 @@ export const config = [
signupRestrictions,
jwt,
mail,
loops
loops,
bankfeed,
throttle,
redis,
queue,
bullBoard,
];

View File

@@ -0,0 +1,6 @@
import { registerAs } from '@nestjs/config';
export default registerAs('queue', () => ({
host: process.env.QUEUE_HOST || 'localhost',
port: parseInt(process.env.QUEUE_PORT, 10) || 6379,
}));

View File

@@ -1,5 +1,5 @@
import { parseBoolean } from '@/utils/parse-boolean';
import { registerAs } from '@nestjs/config';
import { parseBoolean } from '@/utils/parse-boolean';
export default registerAs('signupConfirmation', () => ({
enabled: parseBoolean<boolean>(process.env.SIGNUP_EMAIL_CONFIRMATION, false),

View File

@@ -7,4 +7,6 @@ export default registerAs('systemDatabase', () => ({
user: process.env.SYSTEM_DB_USER || process.env.DB_USER,
password: process.env.SYSTEM_DB_PASSWORD || process.env.DB_PASSWORD,
databaseName: process.env.SYSTEM_DB_NAME || process.env.DB_NAME,
migrationDir: process.env.SYSTEM_DB_MIGRATION_DIR || './src/database/system/migrations',
seedsDir: process.env.SYSTEM_DB_SEEDS_DIR || './src/database/system/seeds',
}));

View File

@@ -8,6 +8,6 @@ export default registerAs('tenantDatabase', () => ({
user: process.env.TENANT_DB_USER || process.env.DB_USER,
password: process.env.TENANT_DB_PASSWORD || process.env.DB_PASSWORD,
dbNamePrefix: process.env.TENANT_DB_NAME_PERFIX || 'bigcapital_tenant_',
migrationsDir: path.join(__dirname, '../../database/migrations'),
seedsDir: path.join(__dirname, '../../database/seeds/core'),
migrationsDir: path.join(__dirname, '../../database/tenant/migrations'),
seedsDir: path.join(__dirname, '../../database/tenant/seeds/core'),
}));

View File

@@ -0,0 +1,14 @@
import { registerAs } from '@nestjs/config';
export default registerAs('throttle', () => ({
global: {
ttl: parseInt(process.env.THROTTLE_GLOBAL_TTL ?? '60000', 10),
limit: parseInt(process.env.THROTTLE_GLOBAL_LIMIT ?? '100', 10),
},
auth: {
ttl: parseInt(process.env.THROTTLE_AUTH_TTL ?? '60000', 10),
limit: parseInt(process.env.THROTTLE_AUTH_LIMIT ?? '10', 10),
},
}));

View File

@@ -0,0 +1,20 @@
import { applyDecorators } from '@nestjs/common';
import { ApiHeader } from '@nestjs/swagger';
export function ApiCommonHeaders() {
return applyDecorators(
ApiHeader({
name: 'Authorization',
description:
"Value must be 'Bearer <token>' where <token> is an API key prefixed with 'bc_' or a JWT token.",
schema: { type: 'string', example: 'Bearer bc_1234567890abcdef' },
required: true,
}),
ApiHeader({
name: 'organization-id',
description:
'Required if Authorization is a JWT token. The organization ID to operate within.',
required: true,
}),
);
}

View File

@@ -0,0 +1,57 @@
/**
* Map to store all models that have been marked to prevent base currency mutation.
* Key is the model name, value is the model class.
*/
export const preventMutateBaseCurrencyModels = new Map<string, any>();
/**
* Decorator that marks an ORM model to prevent base currency mutation.
* When applied to a model class, it adds a static property `preventMutateBaseCurrency` set to true
* and registers the model in the preventMutateBaseCurrencyModels map.
*
* @returns {ClassDecorator} A decorator function that can be applied to a class.
*/
export function PreventMutateBaseCurrency(): ClassDecorator {
return (target: any) => {
// Set the static property on the model class
target.preventMutateBaseCurrency = true;
// Register the model in the map
const modelName = target.name;
preventMutateBaseCurrencyModels.set(modelName, target);
// Return the modified class
return target;
};
}
/**
* Get all registered models that prevent base currency mutation.
*
* @returns {Map<string, any>} Map of model names to model classes
*/
export function getPreventMutateBaseCurrencyModels(): Map<string, any> {
return preventMutateBaseCurrencyModels;
}
/**
* Check if a model is registered to prevent base currency mutation.
*
* @param {string} modelName - The name of the model to check
* @returns {boolean} True if the model is registered, false otherwise
*/
export function isModelPreventMutateBaseCurrency(modelName: string): boolean {
return preventMutateBaseCurrencyModels.has(modelName);
}
/**
* Get a specific model by name that prevents base currency mutation.
*
* @param {string} modelName - The name of the model to retrieve
* @returns {any | undefined} The model class if found, undefined otherwise
*/
export function getPreventMutateBaseCurrencyModel(
modelName: string,
): any | undefined {
return preventMutateBaseCurrencyModels.get(modelName);
}

View File

@@ -0,0 +1,32 @@
import { Transform } from 'class-transformer';
import { ValidateIf, ValidationOptions } from 'class-validator';
/**
* Decorator that converts the property value to a number.
* @returns PropertyDecorator
*/
export function ToNumber() {
return Transform(({ value, key }) => {
const defaultValue = null;
if (typeof value === 'number') {
return value;
}
// If value is an empty string or undefined/null, return it as-is (wont pass validation)
if (value === '' || value === null || value === undefined) {
return defaultValue;
}
const parsed = Number(value);
return !isNaN(parsed) ? parsed : value;
});
}
/**
* Validates if the property is not empty.
* @returns PropertyDecorator
*/
export function IsOptional(validationOptions?: ValidationOptions) {
return ValidateIf((_obj, value) => {
return value !== null && value !== undefined && value !== '';
}, validationOptions);
}

View File

@@ -0,0 +1,55 @@
import { IsArray, IsInt, ArrayNotEmpty, IsBoolean, IsOptional } from 'class-validator';
import { ApiProperty, ApiPropertyOptional } from '@nestjs/swagger';
import { Transform } from 'class-transformer';
import { parseBoolean } from '@/utils/parse-boolean';
export class BulkDeleteDto {
@IsArray()
@ArrayNotEmpty()
@IsInt({ each: true })
@ApiProperty({
description: 'Array of IDs to delete',
type: [Number],
example: [1, 2, 3],
})
ids: number[];
@IsOptional()
@IsBoolean()
@Transform(({ value, obj }) => parseBoolean(value ?? obj?.skip_undeletable, false))
@ApiPropertyOptional({
description: 'When true, undeletable items will be skipped and only deletable ones will be removed.',
type: Boolean,
default: false,
})
skipUndeletable?: boolean;
}
export class ValidateBulkDeleteResponseDto {
@ApiProperty({
description: 'Number of items that can be deleted',
example: 2,
})
deletableCount: number;
@ApiProperty({
description: 'Number of items that cannot be deleted',
example: 1,
})
nonDeletableCount: number;
@ApiProperty({
description: 'IDs of items that can be deleted',
type: [Number],
example: [1, 2],
})
deletableIds: number[];
@ApiProperty({
description: 'IDs of items that cannot be deleted',
type: [Number],
example: [3],
})
nonDeletableIds: number[];
}

View File

@@ -0,0 +1,33 @@
import { ApiProperty } from '@nestjs/swagger';
class Pagination {
@ApiProperty({
description: 'Total number of items across all pages',
example: 100,
})
total: number;
@ApiProperty({
description: 'Current page number (1-based)',
example: 1,
minimum: 1,
})
page: number;
@ApiProperty({
description: 'Number of items per page',
example: 10,
minimum: 1,
})
pageSize: number;
}
export class PaginatedResponseDto<TData> {
@ApiProperty({
description: 'Pagination metadata',
type: Pagination,
})
pagination: Pagination;
data: TData[];
}

View File

@@ -0,0 +1,9 @@
export class ModelHasRelationsError extends Error {
type: string;
constructor(type: string = 'ModelHasRelations', message?: string) {
message = message || `Entity has relations`;
super(message);
this.type = type;
}
}

View File

@@ -0,0 +1,27 @@
import {
ExceptionFilter,
Catch,
ArgumentsHost,
HttpStatus,
} from '@nestjs/common';
import { Response } from 'express';
import { ModelHasRelationsError } from '../exceptions/ModelHasRelations.exception';
@Catch(ModelHasRelationsError)
export class ModelHasRelationsFilter implements ExceptionFilter {
catch(exception: ModelHasRelationsError, host: ArgumentsHost) {
const ctx = host.switchToHttp();
const response = ctx.getResponse<Response>();
const status = HttpStatus.CONFLICT;
response.status(status).json({
errors: [
{
statusCode: status,
type: exception.type || 'MODEL_HAS_RELATIONS',
message: exception.message,
},
],
});
}
}

View File

@@ -7,53 +7,46 @@ import {
} from '@nestjs/common';
import { type Observable } from 'rxjs';
import { map } from 'rxjs/operators';
import { mapKeysDeep } from '@/utils/deepdash';
export function camelToSnake<T = any>(value: T) {
export function camelToSnake<T = any>(value: T): T {
if (value === null || value === undefined) {
return value;
}
if (Array.isArray(value)) {
return value.map(camelToSnake);
}
if (typeof value === 'object' && !(value instanceof Date)) {
return Object.fromEntries(
Object.entries(value).map(([key, value]) => [
key
.split(/(?=[A-Z])/)
.join('_')
.toLowerCase(),
camelToSnake(value),
]),
);
}
return value;
return mapKeysDeep(
value,
(_value: string, key: any, parent: any, context: any) => {
if (Array.isArray(parent)) {
// tell mapKeysDeep to skip mapping inside this branch
context.skipChildren = true;
return key;
}
return key
.split(/(?=[A-Z])/)
.join('_')
.toLowerCase();
},
) as T;
}
export function snakeToCamel<T = any>(value: T) {
export function snakeToCamel<T = any>(value: T): T {
if (value === null || value === undefined) {
return value;
}
if (Array.isArray(value)) {
return value.map(snakeToCamel);
}
const impl = (str: string) => {
const converted = str.replace(/([-_]\w)/g, (group) =>
group[1].toUpperCase(),
);
return converted[0].toLowerCase() + converted.slice(1);
};
if (typeof value === 'object' && !(value instanceof Date)) {
return Object.fromEntries(
Object.entries(value).map(([key, value]) => [
impl(key),
snakeToCamel(value),
]),
);
}
return value;
return mapKeysDeep(
value,
(_value: string, key: any, parent: any, context: any) => {
if (Array.isArray(parent)) {
// tell mapKeysDeep to skip mapping inside this branch
context.skipChildren = true;
return key;
}
const converted = key.replace(/([-_]\w)/g, (group) =>
group[1].toUpperCase(),
);
return converted[0].toLowerCase() + converted.slice(1);
},
) as T;
}
export const DEFAULT_STRATEGY = {
@@ -63,14 +56,17 @@ export const DEFAULT_STRATEGY = {
@Injectable()
export class SerializeInterceptor implements NestInterceptor<any, any> {
constructor(@Optional() readonly strategy = DEFAULT_STRATEGY) {}
constructor(@Optional() readonly strategy = DEFAULT_STRATEGY) { }
intercept(
context: ExecutionContext,
next: CallHandler<any>,
): Observable<any> {
const request = context.switchToHttp().getRequest();
// Transform both body and query parameters
request.body = this.strategy.in(request.body);
request.query = this.strategy.in(request.query);
// handle returns stream..
return next.handle().pipe(map(this.strategy.out));

View File

@@ -5,15 +5,18 @@ import {
NestInterceptor,
} from '@nestjs/common';
import { Observable, map } from 'rxjs';
import { mapValues, mapValuesDeep } from '@/utils/deepdash';
import { mapValuesDeep } from '@/utils/deepdash';
@Injectable()
export class ToJsonInterceptor implements NestInterceptor {
intercept(context: ExecutionContext, next: CallHandler): Observable<any> {
return next.handle().pipe(
map((data) => {
if (data === null || data === undefined) {
return data;
}
return mapValuesDeep(data, (value) => {
if (value && typeof value.toJSON === 'function') {
if (value !== null && value !== undefined && typeof value.toJSON === 'function') {
return value.toJSON();
}
return value;

View File

@@ -14,12 +14,15 @@ export class ValidationPipe implements PipeTransform<any> {
return value;
}
const object = plainToInstance(metatype, value);
const errors = await validate(object);
const errors = await validate(object, {
// Strip validated object of any properties that do not have any decorators.
whitelist: true,
});
if (errors.length > 0) {
throw new BadRequestException(errors);
}
return value;
return object;
}
private toValidate(metatype: Function): boolean {

View File

@@ -0,0 +1,7 @@
import { QueryBuilder, Model } from 'objection';
declare module 'objection' {
interface QueryBuilder<M extends Model, R = M[]> {
deleteIfNoRelations(this: QueryBuilder<M, R>, ...args: any[]): Promise<any>;
}
}

View File

@@ -1,4 +1,27 @@
// import { getTransactionsLockingSettingsSchema } from '@/api/controllers/TransactionsLocking/utils';
import { chain, mapKeys } from 'lodash';
const getTransactionsLockingSettingsSchema = (modules: string[]) => {
const moduleSchema = {
active: { type: 'boolean' },
lock_to_date: { type: 'date' },
unlock_from_date: { type: 'date' },
unlock_to_date: { type: 'date' },
lock_reason: { type: 'string' },
unlock_reason: { type: 'string' },
};
return chain(modules)
.map((module: string) => {
return mapKeys(moduleSchema, (value, key: string) => `${module}.${key}`);
})
.flattenDeep()
.reduce((result, value) => {
return {
...result,
...value,
};
}, {})
.value();
};
export const SettingsOptions = {
organization: {
@@ -223,12 +246,12 @@ export const SettingsOptions = {
'locking-type': {
type: 'string',
},
// ...getTransactionsLockingSettingsSchema([
// 'all',
// 'sales',
// 'purchases',
// 'financial',
// ]),
...getTransactionsLockingSettingsSchema([
'all',
'sales',
'purchases',
'financial',
]),
},
features: {
'multi-warehouses': {

View File

@@ -0,0 +1,9 @@
exports.up = (knex) => knex.schema.createTable('password_resets', (table) => {
table.increments();
table.string('email').index();
table.string('token').index();
table.timestamp('created_at');
});
exports.down = (knex) => knex.schema.dropTableIfExists('password_resets');

View File

@@ -0,0 +1,22 @@
exports.up = function(knex) {
return knex.schema.createTable('tenants', (table) => {
table.bigIncrements();
table.string('organization_id').index();
table.dateTime('under_maintenance_since').nullable();
table.dateTime('initialized_at').nullable();
table.dateTime('seeded_at').nullable();
table.dateTime('built_at').nullable();
table.string('build_job_id');
table.integer('database_batch');
table.string('upgrade_job_id');
table.timestamps();
});
};
exports.down = function(knex) {
return knex.schema.dropTableIfExists('tenants');
};

View File

@@ -0,0 +1,26 @@
exports.up = (knex) => {
return knex.schema.createTable('users', (table) => {
table.increments();
table.string('first_name');
table.string('last_name');
table.string('email').index();
table.string('phone_number').index();
table.string('password');
table.boolean('active').index();
table.string('language');
table
.bigInteger('tenant_id')
.unsigned()
.index()
.references('id')
.inTable('tenants');
table.dateTime('invite_accepted_at').index();
table.dateTime('last_login_at').index();
table.dateTime('deleted_at').index();
table.timestamps();
});
};
exports.down = (knex) => {
return knex.schema.dropTableIfExists('users');
};

View File

@@ -0,0 +1,15 @@
exports.up = function(knex) {
return knex.schema.createTable('user_invites', (table) => {
table.increments();
table.string('email').index();
table.string('token').unique().index();
table.bigInteger('tenant_id').unsigned().index().references('id').inTable('tenants');
table.integer('user_id').unsigned().index().references('id').inTable('users');
table.datetime('created_at');
});
};
exports.down = function(knex) {
return knex.schema.dropTableIfExists('user_invites');
};

View File

@@ -0,0 +1,22 @@
exports.up = function(knex) {
return knex.schema.createTable('subscriptions_plans', table => {
table.increments();
table.string('name');
table.string('description');
table.decimal('price');
table.string('currency', 3);
table.integer('trial_period');
table.string('trial_interval');
table.integer('invoice_period');
table.string('invoice_interval');
table.timestamps();
});
};
exports.down = function(knex) {
return knex.schema.dropTableIfExists('subscriptions_plans')
};

View File

@@ -0,0 +1,30 @@
exports.up = function(knex) {
return knex.schema.createTable('subscription_plans', table => {
table.increments();
table.string('slug');
table.string('name');
table.string('desc');
table.boolean('active');
table.decimal('price').unsigned();
table.string('currency', 3);
table.decimal('trial_period').nullable();
table.string('trial_interval').nullable();
table.decimal('invoice_period').nullable();
table.string('invoice_interval').nullable();
table.integer('index').unsigned();
table.timestamps();
}).then(() => {
return knex.seed.run({
specific: 'seed_subscriptions_plans.js',
});
});
};
exports.down = function(knex) {
return knex.schema.dropTableIfExists('subscription_plans')
};

View File

@@ -0,0 +1,22 @@
exports.up = function(knex) {
return knex.schema.createTable('subscription_plan_subscriptions', table => {
table.increments('id');
table.string('slug');
table.integer('plan_id').unsigned().index().references('id').inTable('subscription_plans');
table.bigInteger('tenant_id').unsigned().index().references('id').inTable('tenants');
table.dateTime('starts_at').nullable();
table.dateTime('ends_at').nullable();
table.dateTime('cancels_at').nullable();
table.dateTime('canceled_at').nullable();
table.timestamps();
});
};
exports.down = function(knex) {
return knex.schema.dropTableIfExists('subscription_plan_subscriptions');
};

View File

@@ -0,0 +1,22 @@
exports.up = function (knex) {
return knex.schema.createTable('tenants_metadata', (table) => {
table.bigIncrements();
table.integer('tenant_id').unsigned();
table.string('name');
table.string('industry');
table.string('location');
table.string('base_currency');
table.string('language');
table.string('timezone');
table.string('date_format');
table.string('fiscal_year');
});
};
exports.down = function (knex) {
return knex.schema.dropTableIfExists('tenants_metadata');
};

View File

@@ -0,0 +1,11 @@
exports.up = function (knex) {
return knex.schema.table('tenants_metadata', (table) => {
table.string('tax_number')
});
};
exports.down = function (knex) {
return knex.schema.table('tenants_metadata', (table) => {
table.dropColumn('tax_number');
});
};

View File

@@ -0,0 +1,22 @@
exports.up = function (knex) {
return knex.schema.createTable('imports', (table) => {
table.increments();
table.string('filename');
table.string('import_id');
table.string('resource');
table.json('columns');
table.json('mapping');
table.json('params');
table
.bigInteger('tenant_id')
.unsigned()
.index()
.references('id')
.inTable('tenants');
table.timestamps();
});
};
exports.down = function (knex) {
return knex.schema.dropTableIfExists('imports');
};

View File

@@ -0,0 +1,15 @@
exports.up = function (knex) {
return knex.schema.createTable('plaid_items', (table) => {
table.bigIncrements('id');
table
.bigInteger('tenant_id')
.unsigned()
.index()
.references('id')
.inTable('tenants');
table.string('plaid_item_id');
table.timestamps();
});
};
exports.down = (knex) => {};

View File

@@ -0,0 +1,7 @@
exports.up = function (knex) {
return knex.seed.run({
specific: 'seed_tenants_free_subscription.js',
});
};
exports.down = function (knex) {};

View File

@@ -0,0 +1,12 @@
exports.up = function (knex) {
return knex.schema
.table('users', (table) => {
table.string('verify_token');
table.boolean('verified').defaultTo(false);
})
.then(() => {
return knex('USERS').update({ verified: true });
});
};
exports.down = (knex) => {};

View File

@@ -0,0 +1,11 @@
exports.up = function (knex) {
return knex.schema.table('subscription_plans', (table) => {
table.string('lemon_variant_id').nullable().index();
});
};
exports.down = (knex) => {
return knex.schema.table('subscription_plans', (table) => {
table.dropColumn('lemon_variant_id');
});
};

View File

@@ -0,0 +1,96 @@
exports.up = function (knex) {
return knex('subscription_plans').insert([
// Capital Basic
{
name: 'Capital Basic (Monthly)',
slug: 'capital-basic-monthly',
price: 10,
active: true,
currency: 'USD',
invoice_period: 1,
invoice_interval: 'month',
lemon_variant_id: '446152',
// lemon_variant_id: '450016',
},
{
name: 'Capital Basic (Annually)',
slug: 'capital-basic-annually',
price: 90,
active: true,
currency: 'USD',
invoice_period: 1,
invoice_interval: 'year',
lemon_variant_id: '446153',
// lemon_variant_id: '450018',
},
// # Capital Essential
{
name: 'Capital Essential (Monthly)',
slug: 'capital-essential-monthly',
price: 20,
active: true,
currency: 'USD',
invoice_period: 1,
invoice_interval: 'month',
lemon_variant_id: '446155',
// lemon_variant_id: '450028',
},
{
name: 'Capital Essential (Annually)',
slug: 'capital-essential-annually',
price: 180,
active: true,
invoice_period: 1,
invoice_interval: 'year',
lemon_variant_id: '446156',
// lemon_variant_id: '450029',
},
// # Capital Plus
{
name: 'Capital Plus (Monthly)',
slug: 'capital-plus-monthly',
price: 25,
active: true,
invoice_period: 1,
invoice_interval: 'month',
lemon_variant_id: '446165',
// lemon_variant_id: '450031',
},
{
name: 'Capital Plus (Annually)',
slug: 'capital-plus-annually',
price: 228,
active: true,
invoice_period: 1,
invoice_interval: 'year',
lemon_variant_id: '446164',
// lemon_variant_id: '450032',
},
// # Capital Big
{
name: 'Capital Big (Monthly)',
slug: 'capital-big-monthly',
price: 40,
active: true,
invoice_period: 1,
invoice_interval: 'month',
lemon_variant_id: '446167',
// lemon_variant_id: '450024',
},
{
name: 'Capital Big (Annually)',
slug: 'capital-big-annually',
price: 360,
active: true,
invoice_period: 1,
invoice_interval: 'year',
lemon_variant_id: '446168',
// lemon_variant_id: '450025',
},
]);
};
exports.down = function (knex) {};

View File

@@ -0,0 +1,11 @@
exports.up = function (knex) {
return knex.schema.table('subscription_plan_subscriptions', (table) => {
table.string('lemon_subscription_id').nullable();
});
};
exports.down = function (knex) {
return knex.schema.table('subscription_plan_subscriptions', (table) => {
table.dropColumn('lemon_subscription_id');
});
};

View File

@@ -0,0 +1,13 @@
exports.up = function (knex) {
return knex.schema.table('subscription_plan_subscriptions', (table) => {
table.dateTime('trial_ends_at').nullable();
table.dropColumn('cancels_at');
});
};
exports.down = function (knex) {
return knex.schema.table('subscription_plan_subscriptions', (table) => {
table.dropColumn('trial_ends_at').nullable();
table.dateTime('cancels_at').nullable();
});
};

View File

@@ -0,0 +1,21 @@
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.up = function (knex) {
return knex.schema.createTable('oneclick_demos', (table) => {
table.increments('id');
table.string('key');
table.integer('tenant_id').unsigned();
table.integer('user_id').unsigned();
table.timestamps();
});
};
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.down = function (knex) {
return knex.schema.dropTableIfExists('oneclick_demos');
};

View File

@@ -0,0 +1,19 @@
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.up = function (knex) {
return knex.schema.table('subscription_plan_subscriptions', (table) => {
table.string('payment_status');
});
};
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.down = function (knex) {
return knex.schema.table('subscription_plan_subscriptions', (table) => {
table.dropColumn('payment_status');
});
};

View File

@@ -0,0 +1,20 @@
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.up = function (knex) {
return knex.schema.createTable('stripe_accounts', (table) => {
table.increments('id').primary();
table.string('stripe_account_id').notNullable();
table.string('tenant_id').notNullable();
table.timestamps(true, true); // Adds created_at and updated_at columns
});
};
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.down = function (knex) {
return knex.schema.dropTableIfExists('stripe_accounts');
};

View File

@@ -0,0 +1,24 @@
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.up = function (knex) {
return knex.schema.createTable('payment_links', (table) => {
table.increments('id');
table.integer('tenant_id');
table.integer('resource_id');
table.text('resource_type');
table.string('linkId');
table.string('publicity');
table.datetime('expiry_at');
table.timestamps();
});
};
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.down = function (knex) {
return knex.schema.dropTableIfExists('payment_links');
};

View File

@@ -0,0 +1,23 @@
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.up = function (knex) {
return knex.schema.table('tenants_metadata', (table) => {
table.string('primary_color');
table.string('logo_key');
table.json('address');
});
};
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.down = function (knex) {
return knex.schema.table('tenants_metadata', (table) => {
table.dropColumn('primary_color');
table.dropColumn('logo_key');
table.dropColumn('address');
});
};

View File

@@ -0,0 +1,36 @@
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.up = function (knex) {
return knex.schema.createTable('api_keys', (table) => {
table.increments();
table.string('key').notNullable().unique().index();
table.string('name');
table
.integer('user_id')
.unsigned()
.notNullable()
.index()
.references('id')
.inTable('users');
table
.bigInteger('tenant_id')
.unsigned()
.notNullable()
.index()
.references('id')
.inTable('tenants');
table.dateTime('expires_at').nullable().index();
table.dateTime('revoked_at').nullable().index();
table.timestamps();
});
};
/**
* @param { import("knex").Knex } knex
* @returns { Promise<void> }
*/
exports.down = function (knex) {
return knex.schema.dropTableIfExists('api_keys');
};

View File

@@ -0,0 +1,26 @@
exports.seed = (knex) => {
// Deletes ALL existing entries
return knex('subscription_plans')
.del()
.then(() => {
// Inserts seed entries
return knex('subscription_plans').insert([
{
name: 'Free',
slug: 'free',
price: 0,
active: true,
currency: 'USD',
},
{
name: 'Early Adaptor',
slug: 'early-adaptor',
price: 29,
active: true,
currency: 'USD',
invoice_period: 12,
invoice_interval: 'month',
},
]);
});
};

View File

@@ -0,0 +1,26 @@
exports.seed = (knex) => {
// Deletes ALL existing entries
return knex('subscription_plan_subscriptions')
.then(async () => {
const tenants = await knex('tenants');
for (const tenant of tenants) {
const existingSubscription = await knex('subscription_plan_subscriptions')
.where('tenantId', tenant.id)
.first();
if (!existingSubscription) {
const freePlan = await knex('subscription_plans').where('slug', 'free').first();
await knex('subscription_plan_subscriptions').insert({
tenantId: tenant.id,
planId: freePlan.id,
slug: 'main',
startsAt: knex.fn.now(),
endsAt: null,
createdAt: knex.fn.now(),
});
}
}
});
};

Some files were not shown because too many files have changed in this diff Show More