docs(core): parallelization and distribution guide (#20435)

This commit is contained in:
Isaac Mann 2023-12-04 02:17:49 -05:00 committed by GitHub
parent 00f77ea314
commit e8361e6771
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 147 additions and 156 deletions

View File

@ -240,13 +240,13 @@
"tags": [] "tags": []
}, },
{ {
"id": "dte", "id": "parallelization-distribution",
"name": "Improve Worst Case CI Times", "name": "Parallelization and Distribution",
"description": "", "description": "",
"file": "shared/concepts/improve-worst-case-ci-times", "file": "nx-cloud/concepts/parallelization-distribution",
"itemList": [], "itemList": [],
"isExternal": false, "isExternal": false,
"path": "/ci/concepts/dte", "path": "/ci/concepts/parallelization-distribution",
"tags": ["distribute-task-execution"] "tags": ["distribute-task-execution"]
}, },
{ {
@ -274,14 +274,14 @@
"path": "/ci/concepts/reduce-waste", "path": "/ci/concepts/reduce-waste",
"tags": [] "tags": []
}, },
"/ci/concepts/dte": { "/ci/concepts/parallelization-distribution": {
"id": "dte", "id": "parallelization-distribution",
"name": "Improve Worst Case CI Times", "name": "Parallelization and Distribution",
"description": "", "description": "",
"file": "shared/concepts/improve-worst-case-ci-times", "file": "nx-cloud/concepts/parallelization-distribution",
"itemList": [], "itemList": [],
"isExternal": false, "isExternal": false,
"path": "/ci/concepts/dte", "path": "/ci/concepts/parallelization-distribution",
"tags": ["distribute-task-execution"] "tags": ["distribute-task-execution"]
}, },
"/ci/concepts/cache-security": { "/ci/concepts/cache-security": {

View File

@ -5714,9 +5714,9 @@
"disableCollapsible": false "disableCollapsible": false
}, },
{ {
"name": "Improve Worst Case CI Times", "name": "Parallelization and Distribution",
"path": "/ci/concepts/dte", "path": "/ci/concepts/parallelization-distribution",
"id": "dte", "id": "parallelization-distribution",
"isExternal": false, "isExternal": false,
"children": [], "children": [],
"disableCollapsible": false "disableCollapsible": false
@ -5741,9 +5741,9 @@
"disableCollapsible": false "disableCollapsible": false
}, },
{ {
"name": "Improve Worst Case CI Times", "name": "Parallelization and Distribution",
"path": "/ci/concepts/dte", "path": "/ci/concepts/parallelization-distribution",
"id": "dte", "id": "parallelization-distribution",
"isExternal": false, "isExternal": false,
"children": [], "children": [],
"disableCollapsible": false "disableCollapsible": false

View File

@ -728,10 +728,10 @@
}, },
{ {
"description": "", "description": "",
"file": "shared/concepts/improve-worst-case-ci-times", "file": "nx-cloud/concepts/parallelization-distribution",
"id": "dte", "id": "parallelization-distribution",
"name": "Improve Worst Case CI Times", "name": "Parallelization and Distribution",
"path": "/ci/concepts/dte" "path": "/ci/concepts/parallelization-distribution"
}, },
{ {
"description": "Learn how to set up Nx Cloud for your workspace.", "description": "Learn how to set up Nx Cloud for your workspace.",

View File

@ -1708,10 +1708,10 @@
"file": "nx-cloud/concepts/reduce-waste" "file": "nx-cloud/concepts/reduce-waste"
}, },
{ {
"name": "Improve Worst Case CI Times", "name": "Parallelization and Distribution",
"tags": ["distribute-task-execution"], "tags": ["distribute-task-execution"],
"id": "dte", "id": "parallelization-distribution",
"file": "shared/concepts/improve-worst-case-ci-times" "file": "nx-cloud/concepts/parallelization-distribution"
}, },
{ {
"name": "Cache Security", "name": "Cache Security",

View File

@ -0,0 +1,116 @@
# Parallelization and Distribution
Nx speeds up your CI in several ways. One method is to reduce wasted calculations with the [affected command](/ci/features/affected) and [remote caching](/ci/features/remote-cache). No matter how effective you are at eliminating wasted calculations in CI, there will always be some tasks that really do need to be executed and sometimes that list of tasks will be everything in the repository.
To speed up the essential tasks, Nx [efficiently orchestrates](/concepts/task-pipeline-configuration) the tasks so that prerequisite tasks are executed first, but independent tasks can all be executed concurrently. Running tasks concurrently can be done with parallel processes on the same machine or distributed across multiple machines.
## Parallelization
Any time you execute a task, Nx will parallelize as much as possible. If you run `nx build my-project`, Nx will build the dependencies of that project in parallel as much as possible. If you run `nx run-many -t build` or `nx affected -t build`, Nx will run all the specified tasks and their dependencies in parallel as much as possible.
Nx will limit itself to the maximum number of parallel processes set in the `parallel` property in `nx.json`. To set that limit to `2` for a specific command, you can specify `--parallel=2` in the terminal. This flag works for individual tasks as well as `run-many` and `affected`.
Unfortunately, there is a limit to how many processes a single computer can run in parallel at the same time. Once you hit that limit, you have to wait for all the tasks to complete.
#### Pros and Cons of Using a Single Machine to Execute Tasks on Parallel Processes:
| Characteristic | Pro/Con | Notes |
| -------------- | ------- | -------------------------------------------------------------------------------- |
| Complexity | 🎉 Pro | The pipeline uses the same commands a developer would use on their local machine |
| Debuggability | 🎉 Pro | All build artifacts and logs are on a single machine |
| Speed | ⛔️ Con | The larger a repository gets, the slower your CI will be |
## Distribution Across Machines
Once your repository grows large enough, it makes sense to start using multiple machines to execute tasks in CI. This adds some extra cost to run the extra machines, but the cost of running those machines is much less than the cost of paying developers to sit and wait for CI to finish.
You can either distribute tasks across machines manually, or use Nx Cloud distributed task execution to automatically assign tasks to machines and gather the results back to a single primary machine. When discussing distribution, we refer to the primary machine that determines which tasks to run as the main machine (or job). The machines that only execute the tasks assigned to them are called agent machines (or jobs).
### Manual Distribution
One way to manually distribute tasks is to use binning. Binning is a distribution strategy where there is a main job that divides the work into bins, one for each agent machine. Then every agent executes the work prepared for it. Here is a simplified version of the binning strategy.
```yaml {% fileName="main-job.yml" %}
# Get the list of affected projects
- nx show projects --affected --json > affected-projects.json
# Store the list of affected projects in a PROJECTS environment variable
# that is accessible by the agent jobs
- node storeAffectedProjects.js
```
```yaml {% fileName="lint-agent.yml" %}
# Run lint for all projects defined in PROJECTS
- nx run-many --projects=$PROJECTS -t lint
```
```yaml {% fileName="test-agent.yml" %}
# Run test for all projects defined in PROJECTS
- nx run-many --projects=$PROJECTS -t test
```
```yaml {% fileName="build-agent.yml" %}
# Run build for all projects defined in PROJECTS
- nx run-many --projects=$PROJECTS -t build
```
Here's a visualization of how this approach works:
![CI using binning](/shared/images/dte/binning.svg)
This is faster than the single machine approach, but you can see that there is still idle time where some agents have to wait for other agents to finish their tasks.
There's also a lot of complexity hidden in the idle time in the graph. If `test-agent` tries to run a `test` task that depends on a `build` task that hasn't been completed yet by the `build-agent`, the `test-agent` will start to run that `build` task without pulling it from the cache. Then the `build-agent` might start to run the same `build` task that the `test-agent` is already working on. Now you've reintroduced waste that remote caching was supposed to eliminate.
It is possible in a smaller repository to manually calculate the best order for tasks and encode that order in a script. But that order will need to be adjusted as the repository structure changes and may even be suboptimal depending on what projects were affected in a given PR.
#### Pros and Cons of Manually Distributing Tasks Across Multiple Machines:
| Characteristic | Pro/Con | Notes |
| -------------- | ------- | ------------------------------------------------------------------------------------------------------------------- |
| Complexity | ⛔️ Con | You need to write custom scripts to tell agent machines what tasks to execute. Those scripts need to be maintained. |
| Debuggability | ⛔️ Con | Build artifacts and logs are scattered across agent machines. |
| Speed | 🎉 Pro | Faster than using a single machine |
### Nx Cloud Distributed Task Execution
When you use Nx Cloud's distributed task execution you gain even more speed than manual distribution while preserving the simple set up and easy debuggability of the single machine scenario.
The setup looks like this:
```yaml {% fileName="main-job.yml" %}
# Coordinate the agents to run the tasks and stop agents when the build tasks are done
- npx nx-cloud start-ci-run --stop-agents-after=build
# Run any commands you want here
- nx affected -t lint,test,build
```
```yaml {% fileName="agent.yml" %}
# Wait for tasks to execute
- npx nx-cloud start-agent
```
The visualization for distributed task execution looks like this:
![CI using DTE](/shared/images/dte/3agents.svg)
In the same way that Nx efficiently assigns tasks to parallel processes on a single machine so that pre-requisite tasks are executed first, Nx Cloud's distributed task execution efficiently assigns tasks to agent machines so that the idle time of each agent machine is kept to a minimum. Nx performs these calculations for each PR, so no matter which projects are affected or how your project structure changes, Nx will optimally assign tasks to the agents available.
As your repository grows and CI runs start to slow down, add another agent machine to your pipeline and Nx Cloud will use that extra capacity to handle the larger volume of tasks. If you would like Nx Cloud to automatically provision the agents for you, check out [Nx Agents](/ci/features/nx-agents).
#### Pros and Cons of Using Nx Cloud's Distributed Task Execution:
| Characteristic | Pro/Con | Notes |
| -------------- | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Complexity | 🎉 Pro | The pipeline uses the same commands a developer would use on their local machine, but with one extra line before running tasks and a single line for each agent to execute. |
| Debuggability | 🎉 Pro | Build artifacts and logs are collated to the main machine as if all tasks were executed on that machine |
| Speed | 🎉 Pro | Fastest possible task distribution for each PR |
### Nx Cloud Concurrency Limits
As you scale your usage of Nx Cloud, you may run into concurrency limits. Nx Cloud puts a [limit on the number of CI machines](https://nx.app/pricing) in your workspace that are simultaneously connecting to Nx Cloud. This includes any machine running in CI - both the main CI pipeline machine and any agent machines.
The Free plan offers 30 concurrent connections, the Startup plan offers 50 concurrent connections, the Pro plan offers 70 concurrent connections and the enterprise plan has no limit on concurrent connections. If each pipeline uses 9 agents in addition to the main job, that makes 10 concurrent connections for each PR. This would mean that on a Pro plan, you can have a maximum of 7 PRs running in CI simultaneously. If an eighth PR was submitted while those 7 were still running, your CI pipeline would experience some degradation and eventually failed CI runs. Once your organization's usage goes below the limit, Nx Cloud will resume functioning as normal.
## Conclusion
If your repo is starting to grow large enough that CI times are suffering, or if your parallelization strategy is growing too complex to manage effectively, try [setting up Nx Cloud with Distributed Task Execution](/ci/features/distribute-task-execution). You can [generate a simple workflow](/nx-api/workspace/generators/ci-workflow) for common CI providers with a `nx g ci-workflow` or follow one of the [CI setup recipes](/ci/recipes/set-up).
Organizations that want extra help setting up Nx Cloud or getting the most out of Nx can [sign up for Nx Enterprise](https://nx.app/enterprise/). This package comes with extra support from the Nx team and the option to host Nx Cloud on your own servers.

View File

@ -981,7 +981,7 @@ Learn more about how to [enforce module boundaries](/core-features/enforce-modul
## Setting Up CI ## Setting Up CI
Without adequate tooling, CI times tend to grow exponentially with the size of the codebase. Nx helps decrease the average CI time with the [`affected` command](/ci/features/affected) and Nx Cloud's [distributed caching](/ci/features/remote-cache). Nx also [decreases the worst case CI time](/ci/concepts/dte) with Nx Cloud's distributed task execution. Without adequate tooling, CI times tend to grow exponentially with the size of the codebase. Nx helps reduce wasted time in CI with the [`affected` command](/ci/features/affected) and Nx Cloud's [remote caching](/ci/features/remote-cache). Nx also [efficiently parallelizes tasks across machines](/ci/concepts/parallelization-distribution) with Nx Cloud's distributed task execution.
To set up Nx Cloud run: To set up Nx Cloud run:

View File

@ -1,126 +0,0 @@
# Improve Worst Case CI Times
In this guide we'll show three CI setups and discuss the pros and cons of each. Nx is designed to be dropped in to any setup and immediately show some benefits. There are some setups, however, that are more able to take advantage of the performance boosts that Nx provides.
The CI setups we'll discuss are:
1. Single CI Job
2. Binning
3. Distributed Task Execution
## Approach 1: Single CI Job
Most organizations start their CI as a single job that is responsible for running any tasks that are required. The script for this CI setup using Nx would look something like this:
```yaml
- nx affected -t lint
- nx affected -t test
- nx affected -t build
```
This script will run all lint, test and build targets for projects that are affected by the current PR.
### 🎉 Pro: Simple Setup
This approach is the simplest to setup of the three types. The execution flow is the exact same as if a developer were manually checking everything on their own machine.
### 🎉 Pro: Simple Debugging
Since all the tasks are executed on the same job, all the error logs and build artifacts are located in one place. This makes it easy to find and diagnose errors.
### ⛔️ Con: Slow
This approach works fine for smaller repos, but as the repo grows and the tasks take longer to execute, CI takes longer and longer to run. Nx's affected and computation caching help improve the average CI time, but the worst case CI time will still grow significantly for each project that is added to the repo.
## Approach 2: Binning
To improve the performance of the worst case CI time, you have to implement some sort of parallelization strategy. Binning is a parallelization strategy where there is a planning job that divides the work into bins, one for each agent job. Then every agent executes the work prepared for it. Here is a simplified version of the binning strategy.
```yaml {% fileName="planning-job.yml" %}
# Get the list of affected projects
- nx show projects --affected --json > affected-projects.json
# Store the list of affected projects in a PROJECTS environment variable
# that is accessible to the agent jobs
- node storeAffectedProjects.js
```
```yaml {% fileName="lint-agent.yml" %}
# Run lint for all projects defined in PROJECTS
- nx run-many --projects=$PROJECTS -t lint
```
```yaml {% fileName="test-agent.yml" %}
# Run test for all projects defined in PROJECTS
- nx run-many --projects=$PROJECTS -t test
```
```yaml {% fileName="build-agent.yml" %}
# Run build for all projects defined in PROJECTS
- nx run-many --projects=$PROJECTS -t build
```
Here's a visualization of how this approach works:
![CI using binning](/shared/images/dte/binning.svg)
### 🎉 Pro: Faster
Because there are now three different jobs running the tasks, the worst case CI time is now only as long as the longest group of tasks. (For this scenario, the build tasks usually take the longest.)
### ⛔️ Con: Complex Debugging
With tasks being run on multiple machines, it can be difficult to find where a particular task was run. Tracking down a specific error message or build artifact becomes more and more difficult the more agents are used and the more complex the planning script becomes.
### ⛔️ Con: Difficult to Share Build Artifacts
If one task needs the outputs file of another task, they either need to be run on the same agent job, or you need to create some mechanism to copy the build artifacts from one job to another. Also, the planning script needs to account for all of these task dependencies as it assigns tasks to each agent.
### ⛔️ Con: Complex Setup
This approach requires you to create extra jobs and maintain the script that assigns tasks to each agent job. You could certainly be smarter about assigning tasks to jobs so that you are more optimally dividing work across agent jobs, but that requires making the planning job more complex.
Even if you make the perfect script that correctly divides up the tasks into evenly-sized bins, the repo will continue to change. Tasks that used to take very little time could start to take more time, and someone will need to revisit the script and keep adjusting it to account for the latest timing values for each task.
## Approach 3: Distributed Task Execution with Nx Cloud
Nx Cloud's Distributed Task Execution removes the burden of the complex setup of binning so that you can fully optimize your worst case CI times while maintaining the ease of setup and debug-ability of the single job approach.
The setup looks like this:
```yaml {% fileName="main-job.yml" %}
# Coordinate the agents to run the tasks and stop agents when the build tasks are done
- npx nx-cloud start-ci-run --stop-agents-after=build
# Run any commands you want here
- nx affected -t lint,test,build
```
```yaml {% fileName="agent.yml" %}
# Wait for tasks to execute
- npx nx-cloud start-agent
```
The visualization for distributed task execution looks like this:
![CI using DTE](/shared/images/dte/3agents.svg)
### 🎉 Pro: Fastest
This approach fully optimizes the binning strategy so that tasks are optimally distributed to however many agents are available.
### 🎉 Pro: Easy to Scale
If CI is taking too long, simply increase the number of agent jobs being started in your CI system and Nx will recognize the new agent jobs are available and distribute tasks accordingly. With this approach, your worst case CI time is only limited by your longest running individual task. If you want Nx to automatically provision the agents for you, check out [Nx Agents](/ci/features/nx-agents).
### 🎉 Pro: Build Artifacts
Nx uses the dependency graph to ensure that tasks are executed in the correct order. Nx Cloud then uses the distributed computation cache to make sure that build artifacts from prior tasks are always present for the current task, no matter which agent the tasks were run on. When developing your tasks, you can think of your CI as a single job, even though it is being distributed across an arbitrary number of agents.
### 🎉 Pro: Simpler Debugging
Because Nx uses distributed computation caching to replay all the tasks back on the main job, every log and build artifact is present on that single job. No matter how many agents are used to speed up the CI time, all the debugging information can be found in a single place.
## Conclusion
If your repo is starting to grow large enough that CI times are suffering, or if your parallelization strategy is growing too complex to manage effectively, try [setting up Nx Cloud with Distributed Task Execution](/ci/features/distribute-task-execution). You can generate a simple workflow for common CI providers with a single command and then customize from there.
Nx Cloud is [free for up to 300 CI Pipeline Executions](https://nx.app/pricing/) per month. Most organizations do not exceed the free tier. If you're working on an open source repo, we'll give you a coupon for unlimited free use of Nx Cloud.
Organizations that want extra help setting up Nx Cloud or getting the most out of Nx can [sign up for Nx Enterprise](https://nx.app/enterprise/). This package comes with extra support from the Nx team and the option to host Nx Cloud on your own servers.

View File

@ -25,7 +25,7 @@ When you set up Nx's distributed task execution, your task graph will look more
And not only will CI finish faster, but the debugging experience is the same as if you ran all of your CI on a single And not only will CI finish faster, but the debugging experience is the same as if you ran all of your CI on a single
job. That's because Nx uses distributed caching to recreate all of the logs and build artifacts on the main job. job. That's because Nx uses distributed caching to recreate all of the logs and build artifacts on the main job.
Find more information in this [detailed guide to improve your worst case CI times](/ci/concepts/dte). Find more information in this [guide to parallelization and distribution in CI](/ci/concepts/parallelization-distribution).
## Set up ## Set up

View File

@ -372,7 +372,7 @@ module.exports = withModuleFederation({
Now you can run `nx build host` to build all the `host` and all the implicit dependencies in production mode. Now you can run `nx build host` to build all the `host` and all the implicit dependencies in production mode.
{% callout type="note" title="Distributed caching" %} {% callout type="note" title="Distributed caching" %}
Again, if you don't use [Nx Cloud's Distributed Tasks Execution](/ci/concepts/dte) using Module Federation will be slower Again, if you don't use [Nx Cloud's Distributed Tasks Execution](/ci/features/distribute-task-execution) using Module Federation will be slower
than building everything in a single process. It's only if you enable Distributed Tasks Execution, your CI will be able than building everything in a single process. It's only if you enable Distributed Tasks Execution, your CI will be able
to build each remote on a separate machine, in parallel, (or not build it at all and retrieve it from cache), which will to build each remote on a separate machine, in parallel, (or not build it at all and retrieve it from cache), which will
reduce the CI time. reduce the CI time.

View File

@ -23,7 +23,7 @@ Nx comes with a powerful task scheduler that intelligently runs operations and m
- **Parallelization and task dependencies -** Nx automatically [knows how your projects relate to each other](/concepts/more-concepts/how-project-graph-is-built). As a result, if `project-a` depends on `project-b` and you run the build command for `project-a`, Nx first runs the builds for all of `project-a`'s dependencies and then the invoked project itself. Nx sorts these tasks to maximize parallelism. - **Parallelization and task dependencies -** Nx automatically [knows how your projects relate to each other](/concepts/more-concepts/how-project-graph-is-built). As a result, if `project-a` depends on `project-b` and you run the build command for `project-a`, Nx first runs the builds for all of `project-a`'s dependencies and then the invoked project itself. Nx sorts these tasks to maximize parallelism.
- **Only run what changed -** Using [Nx affected commands](/ci/features/affected) you only really execute tasks on the projects that changed, compared to a given baseline (usually the main branch). - **Only run what changed -** Using [Nx affected commands](/ci/features/affected) you only really execute tasks on the projects that changed, compared to a given baseline (usually the main branch).
- **Caching -** You get Nx's [computation caching](/concepts/how-caching-works) for free. All operations, including artifacts and terminal output are restored from the cache (if present) in a completely transparent way without disrupting your DX. No configuration needed. Obviously this results in an incredible speed improvement. - **Caching -** You get Nx's [computation caching](/concepts/how-caching-works) for free. All operations, including artifacts and terminal output are restored from the cache (if present) in a completely transparent way without disrupting your DX. No configuration needed. Obviously this results in an incredible speed improvement.
- **Distributed Task Execution -** This is unique to Nx. In combination with Nx Cloud your tasks are automatically distributed across CI agents, taking into account build order, maximizing parallelization and thus agent utilization. It even learns from previous runs to better distribute tasks! [Learn more](/ci/concepts/dte) - **Distributed Task Execution -** This is unique to Nx. In combination with Nx Cloud your tasks are automatically distributed across CI agents, taking into account build order, maximizing parallelization and thus agent utilization. It even learns from previous runs to better distribute tasks! [Learn more](/ci/features/distribute-task-execution)
## Integrating Nx with Lerna ## Integrating Nx with Lerna

View File

@ -997,7 +997,7 @@ Learn more about how to [enforce module boundaries](/core-features/enforce-modul
## Setting Up CI ## Setting Up CI
Without adequate tooling, CI times tend to grow exponentially with the size of the codebase. Nx helps decrease the average CI time with the [`affected` command](/ci/features/affected) and Nx Cloud's [distributed caching](/ci/features/remote-cache). Nx also [decreases the worst case CI time](/ci/concepts/dte) with Nx Cloud's distributed task execution. Without adequate tooling, CI times tend to grow exponentially with the size of the codebase. Nx helps reduce wasted time in CI with the [`affected` command](/ci/features/affected) and Nx Cloud's [remote caching](/ci/features/remote-cache). Nx also [efficiently parallelizes tasks across machines](/ci/concepts/parallelization-distribution) with Nx Cloud's distributed task execution.
To set up Nx Cloud run: To set up Nx Cloud run:

View File

@ -276,7 +276,7 @@
- [Nx Agents](/ci/features/nx-agents) - [Nx Agents](/ci/features/nx-agents)
- [Concepts](/ci/concepts) - [Concepts](/ci/concepts)
- [Reduce Waste in CI](/ci/concepts/reduce-waste) - [Reduce Waste in CI](/ci/concepts/reduce-waste)
- [Improve Worst Case CI Times](/ci/concepts/dte) - [Parallelization and Distribution](/ci/concepts/parallelization-distribution)
- [Cache Security](/ci/concepts/cache-security) - [Cache Security](/ci/concepts/cache-security)
- [Recipes](/ci/recipes) - [Recipes](/ci/recipes)
- [Set Up CI](/ci/recipes/set-up) - [Set Up CI](/ci/recipes/set-up)

View File

@ -256,7 +256,7 @@ const diataxis = {
'/ci/monorepo-ci-gitlab': '/recipes/ci/monorepo-ci-gitlab', '/ci/monorepo-ci-gitlab': '/recipes/ci/monorepo-ci-gitlab',
'/ci/monorepo-ci-bitbucket-pipelines': '/ci/monorepo-ci-bitbucket-pipelines':
'/recipes/ci/monorepo-ci-bitbucket-pipelines', '/recipes/ci/monorepo-ci-bitbucket-pipelines',
'/ci/distributed-builds': '/concepts/dte', // 👀 '/ci/distributed-builds': '/nx-cloud/concepts/parallelization-distribution', // 👀
'/ci/incremental-builds': '/concepts/more-concepts/incremental-builds', '/ci/incremental-builds': '/concepts/more-concepts/incremental-builds',
'/ci/setup-incremental-builds-angular': '/ci/setup-incremental-builds-angular':
'/recipes/other/setup-incremental-builds-angular', '/recipes/other/setup-incremental-builds-angular',
@ -441,7 +441,8 @@ const nxCloudUrls = {
'/nx-cloud/recipes/on-premise/auth-saml-managed', '/nx-cloud/recipes/on-premise/auth-saml-managed',
'/nx-cloud/private-cloud/advanced-config': '/nx-cloud/private-cloud/advanced-config':
'/nx-cloud/recipes/on-premise/advanced-config', '/nx-cloud/recipes/on-premise/advanced-config',
'/concepts/dte': '/ci/concepts/dte', '/concepts/dte': '/ci/concepts/parallelization-distribution',
'/nx-cloud/concepts/dte': '/ci/concepts/parallelization-distribution',
'/nx-cloud/intro/nx-cloud-workflows': '/ci/features/nx-cloud-workflows', '/nx-cloud/intro/nx-cloud-workflows': '/ci/features/nx-cloud-workflows',
'/nx-cloud/account': '/ci/recipes/security', '/nx-cloud/account': '/ci/recipes/security',
'/nx-cloud/account/google-auth': '/ci/recipes/security/google-auth', '/nx-cloud/account/google-auth': '/ci/recipes/security/google-auth',