We recommend using Azure Native.
azure.synapse.SparkPool
Explore with Pulumi AI
Manages a Synapse Spark Pool.
Example Usage
import * as pulumi from "@pulumi/pulumi";
import * as azure from "@pulumi/azure";
const example = new azure.core.ResourceGroup("example", {
    name: "example-resources",
    location: "West Europe",
});
const exampleAccount = new azure.storage.Account("example", {
    name: "examplestorageacc",
    resourceGroupName: example.name,
    location: example.location,
    accountTier: "Standard",
    accountReplicationType: "LRS",
    accountKind: "StorageV2",
    isHnsEnabled: true,
});
const exampleDataLakeGen2Filesystem = new azure.storage.DataLakeGen2Filesystem("example", {
    name: "example",
    storageAccountId: exampleAccount.id,
});
const exampleWorkspace = new azure.synapse.Workspace("example", {
    name: "example",
    resourceGroupName: example.name,
    location: example.location,
    storageDataLakeGen2FilesystemId: exampleDataLakeGen2Filesystem.id,
    sqlAdministratorLogin: "sqladminuser",
    sqlAdministratorLoginPassword: "H@Sh1CoR3!",
    identity: {
        type: "SystemAssigned",
    },
});
const exampleSparkPool = new azure.synapse.SparkPool("example", {
    name: "example",
    synapseWorkspaceId: exampleWorkspace.id,
    nodeSizeFamily: "MemoryOptimized",
    nodeSize: "Small",
    cacheSize: 100,
    autoScale: {
        maxNodeCount: 50,
        minNodeCount: 3,
    },
    autoPause: {
        delayInMinutes: 15,
    },
    libraryRequirement: {
        content: `appnope==0.1.0
beautifulsoup4==4.6.3
`,
        filename: "requirements.txt",
    },
    sparkConfig: {
        content: "spark.shuffle.spill                true\n",
        filename: "config.txt",
    },
    tags: {
        ENV: "Production",
    },
});
import pulumi
import pulumi_azure as azure
example = azure.core.ResourceGroup("example",
    name="example-resources",
    location="West Europe")
example_account = azure.storage.Account("example",
    name="examplestorageacc",
    resource_group_name=example.name,
    location=example.location,
    account_tier="Standard",
    account_replication_type="LRS",
    account_kind="StorageV2",
    is_hns_enabled=True)
example_data_lake_gen2_filesystem = azure.storage.DataLakeGen2Filesystem("example",
    name="example",
    storage_account_id=example_account.id)
example_workspace = azure.synapse.Workspace("example",
    name="example",
    resource_group_name=example.name,
    location=example.location,
    storage_data_lake_gen2_filesystem_id=example_data_lake_gen2_filesystem.id,
    sql_administrator_login="sqladminuser",
    sql_administrator_login_password="H@Sh1CoR3!",
    identity=azure.synapse.WorkspaceIdentityArgs(
        type="SystemAssigned",
    ))
example_spark_pool = azure.synapse.SparkPool("example",
    name="example",
    synapse_workspace_id=example_workspace.id,
    node_size_family="MemoryOptimized",
    node_size="Small",
    cache_size=100,
    auto_scale=azure.synapse.SparkPoolAutoScaleArgs(
        max_node_count=50,
        min_node_count=3,
    ),
    auto_pause=azure.synapse.SparkPoolAutoPauseArgs(
        delay_in_minutes=15,
    ),
    library_requirement=azure.synapse.SparkPoolLibraryRequirementArgs(
        content="""appnope==0.1.0
beautifulsoup4==4.6.3
""",
        filename="requirements.txt",
    ),
    spark_config=azure.synapse.SparkPoolSparkConfigArgs(
        content="spark.shuffle.spill                true\n",
        filename="config.txt",
    ),
    tags={
        "ENV": "Production",
    })
package main
import (
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage"
	"github.com/pulumi/pulumi-azure/sdk/v5/go/azure/synapse"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		example, err := core.NewResourceGroup(ctx, "example", &core.ResourceGroupArgs{
			Name:     pulumi.String("example-resources"),
			Location: pulumi.String("West Europe"),
		})
		if err != nil {
			return err
		}
		exampleAccount, err := storage.NewAccount(ctx, "example", &storage.AccountArgs{
			Name:                   pulumi.String("examplestorageacc"),
			ResourceGroupName:      example.Name,
			Location:               example.Location,
			AccountTier:            pulumi.String("Standard"),
			AccountReplicationType: pulumi.String("LRS"),
			AccountKind:            pulumi.String("StorageV2"),
			IsHnsEnabled:           pulumi.Bool(true),
		})
		if err != nil {
			return err
		}
		exampleDataLakeGen2Filesystem, err := storage.NewDataLakeGen2Filesystem(ctx, "example", &storage.DataLakeGen2FilesystemArgs{
			Name:             pulumi.String("example"),
			StorageAccountId: exampleAccount.ID(),
		})
		if err != nil {
			return err
		}
		exampleWorkspace, err := synapse.NewWorkspace(ctx, "example", &synapse.WorkspaceArgs{
			Name:                            pulumi.String("example"),
			ResourceGroupName:               example.Name,
			Location:                        example.Location,
			StorageDataLakeGen2FilesystemId: exampleDataLakeGen2Filesystem.ID(),
			SqlAdministratorLogin:           pulumi.String("sqladminuser"),
			SqlAdministratorLoginPassword:   pulumi.String("H@Sh1CoR3!"),
			Identity: &synapse.WorkspaceIdentityArgs{
				Type: pulumi.String("SystemAssigned"),
			},
		})
		if err != nil {
			return err
		}
		_, err = synapse.NewSparkPool(ctx, "example", &synapse.SparkPoolArgs{
			Name:               pulumi.String("example"),
			SynapseWorkspaceId: exampleWorkspace.ID(),
			NodeSizeFamily:     pulumi.String("MemoryOptimized"),
			NodeSize:           pulumi.String("Small"),
			CacheSize:          pulumi.Int(100),
			AutoScale: &synapse.SparkPoolAutoScaleArgs{
				MaxNodeCount: pulumi.Int(50),
				MinNodeCount: pulumi.Int(3),
			},
			AutoPause: &synapse.SparkPoolAutoPauseArgs{
				DelayInMinutes: pulumi.Int(15),
			},
			LibraryRequirement: &synapse.SparkPoolLibraryRequirementArgs{
				Content:  pulumi.String("appnope==0.1.0\nbeautifulsoup4==4.6.3\n"),
				Filename: pulumi.String("requirements.txt"),
			},
			SparkConfig: &synapse.SparkPoolSparkConfigArgs{
				Content:  pulumi.String("spark.shuffle.spill                true\n"),
				Filename: pulumi.String("config.txt"),
			},
			Tags: pulumi.StringMap{
				"ENV": pulumi.String("Production"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Azure = Pulumi.Azure;
return await Deployment.RunAsync(() => 
{
    var example = new Azure.Core.ResourceGroup("example", new()
    {
        Name = "example-resources",
        Location = "West Europe",
    });
    var exampleAccount = new Azure.Storage.Account("example", new()
    {
        Name = "examplestorageacc",
        ResourceGroupName = example.Name,
        Location = example.Location,
        AccountTier = "Standard",
        AccountReplicationType = "LRS",
        AccountKind = "StorageV2",
        IsHnsEnabled = true,
    });
    var exampleDataLakeGen2Filesystem = new Azure.Storage.DataLakeGen2Filesystem("example", new()
    {
        Name = "example",
        StorageAccountId = exampleAccount.Id,
    });
    var exampleWorkspace = new Azure.Synapse.Workspace("example", new()
    {
        Name = "example",
        ResourceGroupName = example.Name,
        Location = example.Location,
        StorageDataLakeGen2FilesystemId = exampleDataLakeGen2Filesystem.Id,
        SqlAdministratorLogin = "sqladminuser",
        SqlAdministratorLoginPassword = "H@Sh1CoR3!",
        Identity = new Azure.Synapse.Inputs.WorkspaceIdentityArgs
        {
            Type = "SystemAssigned",
        },
    });
    var exampleSparkPool = new Azure.Synapse.SparkPool("example", new()
    {
        Name = "example",
        SynapseWorkspaceId = exampleWorkspace.Id,
        NodeSizeFamily = "MemoryOptimized",
        NodeSize = "Small",
        CacheSize = 100,
        AutoScale = new Azure.Synapse.Inputs.SparkPoolAutoScaleArgs
        {
            MaxNodeCount = 50,
            MinNodeCount = 3,
        },
        AutoPause = new Azure.Synapse.Inputs.SparkPoolAutoPauseArgs
        {
            DelayInMinutes = 15,
        },
        LibraryRequirement = new Azure.Synapse.Inputs.SparkPoolLibraryRequirementArgs
        {
            Content = @"appnope==0.1.0
beautifulsoup4==4.6.3
",
            Filename = "requirements.txt",
        },
        SparkConfig = new Azure.Synapse.Inputs.SparkPoolSparkConfigArgs
        {
            Content = @"spark.shuffle.spill                true
",
            Filename = "config.txt",
        },
        Tags = 
        {
            { "ENV", "Production" },
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.azure.core.ResourceGroup;
import com.pulumi.azure.core.ResourceGroupArgs;
import com.pulumi.azure.storage.Account;
import com.pulumi.azure.storage.AccountArgs;
import com.pulumi.azure.storage.DataLakeGen2Filesystem;
import com.pulumi.azure.storage.DataLakeGen2FilesystemArgs;
import com.pulumi.azure.synapse.Workspace;
import com.pulumi.azure.synapse.WorkspaceArgs;
import com.pulumi.azure.synapse.inputs.WorkspaceIdentityArgs;
import com.pulumi.azure.synapse.SparkPool;
import com.pulumi.azure.synapse.SparkPoolArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolAutoScaleArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolAutoPauseArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolLibraryRequirementArgs;
import com.pulumi.azure.synapse.inputs.SparkPoolSparkConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var example = new ResourceGroup("example", ResourceGroupArgs.builder()
            .name("example-resources")
            .location("West Europe")
            .build());
        var exampleAccount = new Account("exampleAccount", AccountArgs.builder()
            .name("examplestorageacc")
            .resourceGroupName(example.name())
            .location(example.location())
            .accountTier("Standard")
            .accountReplicationType("LRS")
            .accountKind("StorageV2")
            .isHnsEnabled("true")
            .build());
        var exampleDataLakeGen2Filesystem = new DataLakeGen2Filesystem("exampleDataLakeGen2Filesystem", DataLakeGen2FilesystemArgs.builder()
            .name("example")
            .storageAccountId(exampleAccount.id())
            .build());
        var exampleWorkspace = new Workspace("exampleWorkspace", WorkspaceArgs.builder()
            .name("example")
            .resourceGroupName(example.name())
            .location(example.location())
            .storageDataLakeGen2FilesystemId(exampleDataLakeGen2Filesystem.id())
            .sqlAdministratorLogin("sqladminuser")
            .sqlAdministratorLoginPassword("H@Sh1CoR3!")
            .identity(WorkspaceIdentityArgs.builder()
                .type("SystemAssigned")
                .build())
            .build());
        var exampleSparkPool = new SparkPool("exampleSparkPool", SparkPoolArgs.builder()
            .name("example")
            .synapseWorkspaceId(exampleWorkspace.id())
            .nodeSizeFamily("MemoryOptimized")
            .nodeSize("Small")
            .cacheSize(100)
            .autoScale(SparkPoolAutoScaleArgs.builder()
                .maxNodeCount(50)
                .minNodeCount(3)
                .build())
            .autoPause(SparkPoolAutoPauseArgs.builder()
                .delayInMinutes(15)
                .build())
            .libraryRequirement(SparkPoolLibraryRequirementArgs.builder()
                .content("""
appnope==0.1.0
beautifulsoup4==4.6.3
                """)
                .filename("requirements.txt")
                .build())
            .sparkConfig(SparkPoolSparkConfigArgs.builder()
                .content("""
spark.shuffle.spill                true
                """)
                .filename("config.txt")
                .build())
            .tags(Map.of("ENV", "Production"))
            .build());
    }
}
resources:
  example:
    type: azure:core:ResourceGroup
    properties:
      name: example-resources
      location: West Europe
  exampleAccount:
    type: azure:storage:Account
    name: example
    properties:
      name: examplestorageacc
      resourceGroupName: ${example.name}
      location: ${example.location}
      accountTier: Standard
      accountReplicationType: LRS
      accountKind: StorageV2
      isHnsEnabled: 'true'
  exampleDataLakeGen2Filesystem:
    type: azure:storage:DataLakeGen2Filesystem
    name: example
    properties:
      name: example
      storageAccountId: ${exampleAccount.id}
  exampleWorkspace:
    type: azure:synapse:Workspace
    name: example
    properties:
      name: example
      resourceGroupName: ${example.name}
      location: ${example.location}
      storageDataLakeGen2FilesystemId: ${exampleDataLakeGen2Filesystem.id}
      sqlAdministratorLogin: sqladminuser
      sqlAdministratorLoginPassword: H@Sh1CoR3!
      identity:
        type: SystemAssigned
  exampleSparkPool:
    type: azure:synapse:SparkPool
    name: example
    properties:
      name: example
      synapseWorkspaceId: ${exampleWorkspace.id}
      nodeSizeFamily: MemoryOptimized
      nodeSize: Small
      cacheSize: 100
      autoScale:
        maxNodeCount: 50
        minNodeCount: 3
      autoPause:
        delayInMinutes: 15
      libraryRequirement:
        content: |
          appnope==0.1.0
          beautifulsoup4==4.6.3          
        filename: requirements.txt
      sparkConfig:
        content: |
          spark.shuffle.spill                true          
        filename: config.txt
      tags:
        ENV: Production
Create SparkPool Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new SparkPool(name: string, args: SparkPoolArgs, opts?: CustomResourceOptions);@overload
def SparkPool(resource_name: str,
              args: SparkPoolArgs,
              opts: Optional[ResourceOptions] = None)
@overload
def SparkPool(resource_name: str,
              opts: Optional[ResourceOptions] = None,
              node_size: Optional[str] = None,
              synapse_workspace_id: Optional[str] = None,
              node_size_family: Optional[str] = None,
              name: Optional[str] = None,
              compute_isolation_enabled: Optional[bool] = None,
              library_requirement: Optional[SparkPoolLibraryRequirementArgs] = None,
              max_executors: Optional[int] = None,
              min_executors: Optional[int] = None,
              auto_pause: Optional[SparkPoolAutoPauseArgs] = None,
              node_count: Optional[int] = None,
              dynamic_executor_allocation_enabled: Optional[bool] = None,
              cache_size: Optional[int] = None,
              session_level_packages_enabled: Optional[bool] = None,
              spark_config: Optional[SparkPoolSparkConfigArgs] = None,
              spark_events_folder: Optional[str] = None,
              spark_log_folder: Optional[str] = None,
              spark_version: Optional[str] = None,
              auto_scale: Optional[SparkPoolAutoScaleArgs] = None,
              tags: Optional[Mapping[str, str]] = None)func NewSparkPool(ctx *Context, name string, args SparkPoolArgs, opts ...ResourceOption) (*SparkPool, error)public SparkPool(string name, SparkPoolArgs args, CustomResourceOptions? opts = null)
public SparkPool(String name, SparkPoolArgs args)
public SparkPool(String name, SparkPoolArgs args, CustomResourceOptions options)
type: azure:synapse:SparkPool
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
 - The unique name of the resource.
 - args SparkPoolArgs
 - The arguments to resource properties.
 - opts CustomResourceOptions
 - Bag of options to control resource's behavior.
 
- resource_name str
 - The unique name of the resource.
 - args SparkPoolArgs
 - The arguments to resource properties.
 - opts ResourceOptions
 - Bag of options to control resource's behavior.
 
- ctx Context
 - Context object for the current deployment.
 - name string
 - The unique name of the resource.
 - args SparkPoolArgs
 - The arguments to resource properties.
 - opts ResourceOption
 - Bag of options to control resource's behavior.
 
- name string
 - The unique name of the resource.
 - args SparkPoolArgs
 - The arguments to resource properties.
 - opts CustomResourceOptions
 - Bag of options to control resource's behavior.
 
- name String
 - The unique name of the resource.
 - args SparkPoolArgs
 - The arguments to resource properties.
 - options CustomResourceOptions
 - Bag of options to control resource's behavior.
 
Constructor example
The following reference example uses placeholder values for all input properties.
var sparkPoolResource = new Azure.Synapse.SparkPool("sparkPoolResource", new()
{
    NodeSize = "string",
    SynapseWorkspaceId = "string",
    NodeSizeFamily = "string",
    Name = "string",
    ComputeIsolationEnabled = false,
    LibraryRequirement = new Azure.Synapse.Inputs.SparkPoolLibraryRequirementArgs
    {
        Content = "string",
        Filename = "string",
    },
    MaxExecutors = 0,
    MinExecutors = 0,
    AutoPause = new Azure.Synapse.Inputs.SparkPoolAutoPauseArgs
    {
        DelayInMinutes = 0,
    },
    NodeCount = 0,
    DynamicExecutorAllocationEnabled = false,
    CacheSize = 0,
    SessionLevelPackagesEnabled = false,
    SparkConfig = new Azure.Synapse.Inputs.SparkPoolSparkConfigArgs
    {
        Content = "string",
        Filename = "string",
    },
    SparkEventsFolder = "string",
    SparkLogFolder = "string",
    SparkVersion = "string",
    AutoScale = new Azure.Synapse.Inputs.SparkPoolAutoScaleArgs
    {
        MaxNodeCount = 0,
        MinNodeCount = 0,
    },
    Tags = 
    {
        { "string", "string" },
    },
});
example, err := synapse.NewSparkPool(ctx, "sparkPoolResource", &synapse.SparkPoolArgs{
	NodeSize:                pulumi.String("string"),
	SynapseWorkspaceId:      pulumi.String("string"),
	NodeSizeFamily:          pulumi.String("string"),
	Name:                    pulumi.String("string"),
	ComputeIsolationEnabled: pulumi.Bool(false),
	LibraryRequirement: &synapse.SparkPoolLibraryRequirementArgs{
		Content:  pulumi.String("string"),
		Filename: pulumi.String("string"),
	},
	MaxExecutors: pulumi.Int(0),
	MinExecutors: pulumi.Int(0),
	AutoPause: &synapse.SparkPoolAutoPauseArgs{
		DelayInMinutes: pulumi.Int(0),
	},
	NodeCount:                        pulumi.Int(0),
	DynamicExecutorAllocationEnabled: pulumi.Bool(false),
	CacheSize:                        pulumi.Int(0),
	SessionLevelPackagesEnabled:      pulumi.Bool(false),
	SparkConfig: &synapse.SparkPoolSparkConfigArgs{
		Content:  pulumi.String("string"),
		Filename: pulumi.String("string"),
	},
	SparkEventsFolder: pulumi.String("string"),
	SparkLogFolder:    pulumi.String("string"),
	SparkVersion:      pulumi.String("string"),
	AutoScale: &synapse.SparkPoolAutoScaleArgs{
		MaxNodeCount: pulumi.Int(0),
		MinNodeCount: pulumi.Int(0),
	},
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
})
var sparkPoolResource = new SparkPool("sparkPoolResource", SparkPoolArgs.builder()
    .nodeSize("string")
    .synapseWorkspaceId("string")
    .nodeSizeFamily("string")
    .name("string")
    .computeIsolationEnabled(false)
    .libraryRequirement(SparkPoolLibraryRequirementArgs.builder()
        .content("string")
        .filename("string")
        .build())
    .maxExecutors(0)
    .minExecutors(0)
    .autoPause(SparkPoolAutoPauseArgs.builder()
        .delayInMinutes(0)
        .build())
    .nodeCount(0)
    .dynamicExecutorAllocationEnabled(false)
    .cacheSize(0)
    .sessionLevelPackagesEnabled(false)
    .sparkConfig(SparkPoolSparkConfigArgs.builder()
        .content("string")
        .filename("string")
        .build())
    .sparkEventsFolder("string")
    .sparkLogFolder("string")
    .sparkVersion("string")
    .autoScale(SparkPoolAutoScaleArgs.builder()
        .maxNodeCount(0)
        .minNodeCount(0)
        .build())
    .tags(Map.of("string", "string"))
    .build());
spark_pool_resource = azure.synapse.SparkPool("sparkPoolResource",
    node_size="string",
    synapse_workspace_id="string",
    node_size_family="string",
    name="string",
    compute_isolation_enabled=False,
    library_requirement=azure.synapse.SparkPoolLibraryRequirementArgs(
        content="string",
        filename="string",
    ),
    max_executors=0,
    min_executors=0,
    auto_pause=azure.synapse.SparkPoolAutoPauseArgs(
        delay_in_minutes=0,
    ),
    node_count=0,
    dynamic_executor_allocation_enabled=False,
    cache_size=0,
    session_level_packages_enabled=False,
    spark_config=azure.synapse.SparkPoolSparkConfigArgs(
        content="string",
        filename="string",
    ),
    spark_events_folder="string",
    spark_log_folder="string",
    spark_version="string",
    auto_scale=azure.synapse.SparkPoolAutoScaleArgs(
        max_node_count=0,
        min_node_count=0,
    ),
    tags={
        "string": "string",
    })
const sparkPoolResource = new azure.synapse.SparkPool("sparkPoolResource", {
    nodeSize: "string",
    synapseWorkspaceId: "string",
    nodeSizeFamily: "string",
    name: "string",
    computeIsolationEnabled: false,
    libraryRequirement: {
        content: "string",
        filename: "string",
    },
    maxExecutors: 0,
    minExecutors: 0,
    autoPause: {
        delayInMinutes: 0,
    },
    nodeCount: 0,
    dynamicExecutorAllocationEnabled: false,
    cacheSize: 0,
    sessionLevelPackagesEnabled: false,
    sparkConfig: {
        content: "string",
        filename: "string",
    },
    sparkEventsFolder: "string",
    sparkLogFolder: "string",
    sparkVersion: "string",
    autoScale: {
        maxNodeCount: 0,
        minNodeCount: 0,
    },
    tags: {
        string: "string",
    },
});
type: azure:synapse:SparkPool
properties:
    autoPause:
        delayInMinutes: 0
    autoScale:
        maxNodeCount: 0
        minNodeCount: 0
    cacheSize: 0
    computeIsolationEnabled: false
    dynamicExecutorAllocationEnabled: false
    libraryRequirement:
        content: string
        filename: string
    maxExecutors: 0
    minExecutors: 0
    name: string
    nodeCount: 0
    nodeSize: string
    nodeSizeFamily: string
    sessionLevelPackagesEnabled: false
    sparkConfig:
        content: string
        filename: string
    sparkEventsFolder: string
    sparkLogFolder: string
    sparkVersion: string
    synapseWorkspaceId: string
    tags:
        string: string
SparkPool Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
The SparkPool resource accepts the following input properties:
- Node
Size string - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - Node
Size stringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - Synapse
Workspace stringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - Auto
Pause SparkPool Auto Pause  - An 
auto_pauseblock as defined below. - Auto
Scale SparkPool Auto Scale  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - Cache
Size int - The cache size in the Spark Pool.
 - Compute
Isolation boolEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - Dynamic
Executor boolAllocation Enabled  - Library
Requirement SparkPool Library Requirement  - Max
Executors int - Min
Executors int - Name string
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - Node
Count int - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - Session
Level boolPackages Enabled  - Spark
Config SparkPool Spark Config  - Spark
Events stringFolder  - Spark
Log stringFolder  - Spark
Version string - Dictionary<string, string>
 
- Node
Size string - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - Node
Size stringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - Synapse
Workspace stringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - Auto
Pause SparkPool Auto Pause Args  - An 
auto_pauseblock as defined below. - Auto
Scale SparkPool Auto Scale Args  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - Cache
Size int - The cache size in the Spark Pool.
 - Compute
Isolation boolEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - Dynamic
Executor boolAllocation Enabled  - Library
Requirement SparkPool Library Requirement Args  - Max
Executors int - Min
Executors int - Name string
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - Node
Count int - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - Session
Level boolPackages Enabled  - Spark
Config SparkPool Spark Config Args  - Spark
Events stringFolder  - Spark
Log stringFolder  - Spark
Version string - map[string]string
 
- node
Size String - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - node
Size StringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - synapse
Workspace StringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - auto
Pause SparkPool Auto Pause  - An 
auto_pauseblock as defined below. - auto
Scale SparkPool Auto Scale  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - cache
Size Integer - The cache size in the Spark Pool.
 - compute
Isolation BooleanEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - dynamic
Executor BooleanAllocation Enabled  - library
Requirement SparkPool Library Requirement  - max
Executors Integer - min
Executors Integer - name String
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - node
Count Integer - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - session
Level BooleanPackages Enabled  - spark
Config SparkPool Spark Config  - spark
Events StringFolder  - spark
Log StringFolder  - spark
Version String - Map<String,String>
 
- node
Size string - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - node
Size stringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - synapse
Workspace stringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - auto
Pause SparkPool Auto Pause  - An 
auto_pauseblock as defined below. - auto
Scale SparkPool Auto Scale  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - cache
Size number - The cache size in the Spark Pool.
 - compute
Isolation booleanEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - dynamic
Executor booleanAllocation Enabled  - library
Requirement SparkPool Library Requirement  - max
Executors number - min
Executors number - name string
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - node
Count number - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - session
Level booleanPackages Enabled  - spark
Config SparkPool Spark Config  - spark
Events stringFolder  - spark
Log stringFolder  - spark
Version string - {[key: string]: string}
 
- node_
size str - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - node_
size_ strfamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - synapse_
workspace_ strid  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - auto_
pause SparkPool Auto Pause Args  - An 
auto_pauseblock as defined below. - auto_
scale SparkPool Auto Scale Args  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - cache_
size int - The cache size in the Spark Pool.
 - compute_
isolation_ boolenabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - dynamic_
executor_ boolallocation_ enabled  - library_
requirement SparkPool Library Requirement Args  - max_
executors int - min_
executors int - name str
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - node_
count int - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - session_
level_ boolpackages_ enabled  - spark_
config SparkPool Spark Config Args  - spark_
events_ strfolder  - spark_
log_ strfolder  - spark_
version str - Mapping[str, str]
 
- node
Size String - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - node
Size StringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - synapse
Workspace StringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - auto
Pause Property Map - An 
auto_pauseblock as defined below. - auto
Scale Property Map - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - cache
Size Number - The cache size in the Spark Pool.
 - compute
Isolation BooleanEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - dynamic
Executor BooleanAllocation Enabled  - library
Requirement Property Map - max
Executors Number - min
Executors Number - name String
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - node
Count Number - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - session
Level BooleanPackages Enabled  - spark
Config Property Map - spark
Events StringFolder  - spark
Log StringFolder  - spark
Version String - Map<String>
 
Outputs
All input properties are implicitly available as output properties. Additionally, the SparkPool resource produces the following output properties:
- Id string
 - The provider-assigned unique ID for this managed resource.
 
- Id string
 - The provider-assigned unique ID for this managed resource.
 
- id String
 - The provider-assigned unique ID for this managed resource.
 
- id string
 - The provider-assigned unique ID for this managed resource.
 
- id str
 - The provider-assigned unique ID for this managed resource.
 
- id String
 - The provider-assigned unique ID for this managed resource.
 
Look up Existing SparkPool Resource
Get an existing SparkPool resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: SparkPoolState, opts?: CustomResourceOptions): SparkPool@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        auto_pause: Optional[SparkPoolAutoPauseArgs] = None,
        auto_scale: Optional[SparkPoolAutoScaleArgs] = None,
        cache_size: Optional[int] = None,
        compute_isolation_enabled: Optional[bool] = None,
        dynamic_executor_allocation_enabled: Optional[bool] = None,
        library_requirement: Optional[SparkPoolLibraryRequirementArgs] = None,
        max_executors: Optional[int] = None,
        min_executors: Optional[int] = None,
        name: Optional[str] = None,
        node_count: Optional[int] = None,
        node_size: Optional[str] = None,
        node_size_family: Optional[str] = None,
        session_level_packages_enabled: Optional[bool] = None,
        spark_config: Optional[SparkPoolSparkConfigArgs] = None,
        spark_events_folder: Optional[str] = None,
        spark_log_folder: Optional[str] = None,
        spark_version: Optional[str] = None,
        synapse_workspace_id: Optional[str] = None,
        tags: Optional[Mapping[str, str]] = None) -> SparkPoolfunc GetSparkPool(ctx *Context, name string, id IDInput, state *SparkPoolState, opts ...ResourceOption) (*SparkPool, error)public static SparkPool Get(string name, Input<string> id, SparkPoolState? state, CustomResourceOptions? opts = null)public static SparkPool get(String name, Output<String> id, SparkPoolState state, CustomResourceOptions options)Resource lookup is not supported in YAML- name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 - state
 - Any extra arguments used during the lookup.
 - opts
 - A bag of options that control this resource's behavior.
 
- resource_name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 
- name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 - state
 - Any extra arguments used during the lookup.
 - opts
 - A bag of options that control this resource's behavior.
 
- name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 - state
 - Any extra arguments used during the lookup.
 - opts
 - A bag of options that control this resource's behavior.
 
- name
 - The unique name of the resulting resource.
 - id
 - The unique provider ID of the resource to lookup.
 - state
 - Any extra arguments used during the lookup.
 - opts
 - A bag of options that control this resource's behavior.
 
- Auto
Pause SparkPool Auto Pause  - An 
auto_pauseblock as defined below. - Auto
Scale SparkPool Auto Scale  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - Cache
Size int - The cache size in the Spark Pool.
 - Compute
Isolation boolEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - Dynamic
Executor boolAllocation Enabled  - Library
Requirement SparkPool Library Requirement  - Max
Executors int - Min
Executors int - Name string
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - Node
Count int - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - Node
Size string - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - Node
Size stringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - Session
Level boolPackages Enabled  - Spark
Config SparkPool Spark Config  - Spark
Events stringFolder  - Spark
Log stringFolder  - Spark
Version string - Synapse
Workspace stringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - Dictionary<string, string>
 
- Auto
Pause SparkPool Auto Pause Args  - An 
auto_pauseblock as defined below. - Auto
Scale SparkPool Auto Scale Args  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - Cache
Size int - The cache size in the Spark Pool.
 - Compute
Isolation boolEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - Dynamic
Executor boolAllocation Enabled  - Library
Requirement SparkPool Library Requirement Args  - Max
Executors int - Min
Executors int - Name string
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - Node
Count int - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - Node
Size string - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - Node
Size stringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - Session
Level boolPackages Enabled  - Spark
Config SparkPool Spark Config Args  - Spark
Events stringFolder  - Spark
Log stringFolder  - Spark
Version string - Synapse
Workspace stringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - map[string]string
 
- auto
Pause SparkPool Auto Pause  - An 
auto_pauseblock as defined below. - auto
Scale SparkPool Auto Scale  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - cache
Size Integer - The cache size in the Spark Pool.
 - compute
Isolation BooleanEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - dynamic
Executor BooleanAllocation Enabled  - library
Requirement SparkPool Library Requirement  - max
Executors Integer - min
Executors Integer - name String
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - node
Count Integer - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - node
Size String - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - node
Size StringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - session
Level BooleanPackages Enabled  - spark
Config SparkPool Spark Config  - spark
Events StringFolder  - spark
Log StringFolder  - spark
Version String - synapse
Workspace StringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - Map<String,String>
 
- auto
Pause SparkPool Auto Pause  - An 
auto_pauseblock as defined below. - auto
Scale SparkPool Auto Scale  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - cache
Size number - The cache size in the Spark Pool.
 - compute
Isolation booleanEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - dynamic
Executor booleanAllocation Enabled  - library
Requirement SparkPool Library Requirement  - max
Executors number - min
Executors number - name string
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - node
Count number - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - node
Size string - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - node
Size stringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - session
Level booleanPackages Enabled  - spark
Config SparkPool Spark Config  - spark
Events stringFolder  - spark
Log stringFolder  - spark
Version string - synapse
Workspace stringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - {[key: string]: string}
 
- auto_
pause SparkPool Auto Pause Args  - An 
auto_pauseblock as defined below. - auto_
scale SparkPool Auto Scale Args  - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - cache_
size int - The cache size in the Spark Pool.
 - compute_
isolation_ boolenabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - dynamic_
executor_ boolallocation_ enabled  - library_
requirement SparkPool Library Requirement Args  - max_
executors int - min_
executors int - name str
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - node_
count int - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - node_
size str - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - node_
size_ strfamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - session_
level_ boolpackages_ enabled  - spark_
config SparkPool Spark Config Args  - spark_
events_ strfolder  - spark_
log_ strfolder  - spark_
version str - synapse_
workspace_ strid  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - Mapping[str, str]
 
- auto
Pause Property Map - An 
auto_pauseblock as defined below. - auto
Scale Property Map - An 
auto_scaleblock as defined below. Exactly one ofnode_countorauto_scalemust be specified. - cache
Size Number - The cache size in the Spark Pool.
 - compute
Isolation BooleanEnabled  - Indicates whether compute isolation is enabled or not. Defaults to 
false. - dynamic
Executor BooleanAllocation Enabled  - library
Requirement Property Map - max
Executors Number - min
Executors Number - name String
 - The name which should be used for this Synapse Spark Pool. Changing this forces a new Synapse Spark Pool to be created.
 - node
Count Number - The number of nodes in the Spark Pool. Exactly one of 
node_countorauto_scalemust be specified. - node
Size String - The level of node in the Spark Pool. Possible values are 
Small,Medium,Large,None,XLarge,XXLargeandXXXLarge. - node
Size StringFamily  - The kind of nodes that the Spark Pool provides. Possible values are 
HardwareAcceleratedFPGA,HardwareAcceleratedGPU,MemoryOptimized, andNone. - session
Level BooleanPackages Enabled  - spark
Config Property Map - spark
Events StringFolder  - spark
Log StringFolder  - spark
Version String - synapse
Workspace StringId  - The ID of the Synapse Workspace where the Synapse Spark Pool should exist. Changing this forces a new Synapse Spark Pool to be created.
 - Map<String>
 
Supporting Types
SparkPoolAutoPause, SparkPoolAutoPauseArgs        
- Delay
In intMinutes  - Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 
5and10080. 
- Delay
In intMinutes  - Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 
5and10080. 
- delay
In IntegerMinutes  - Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 
5and10080. 
- delay
In numberMinutes  - Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 
5and10080. 
- delay_
in_ intminutes  - Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 
5and10080. 
- delay
In NumberMinutes  - Number of minutes of idle time before the Spark Pool is automatically paused. Must be between 
5and10080. 
SparkPoolAutoScale, SparkPoolAutoScaleArgs        
- Max
Node intCount  - The maximum number of nodes the Spark Pool can support. Must be between 
3and200. - Min
Node intCount  - The minimum number of nodes the Spark Pool can support. Must be between 
3and200. 
- Max
Node intCount  - The maximum number of nodes the Spark Pool can support. Must be between 
3and200. - Min
Node intCount  - The minimum number of nodes the Spark Pool can support. Must be between 
3and200. 
- max
Node IntegerCount  - The maximum number of nodes the Spark Pool can support. Must be between 
3and200. - min
Node IntegerCount  - The minimum number of nodes the Spark Pool can support. Must be between 
3and200. 
- max
Node numberCount  - The maximum number of nodes the Spark Pool can support. Must be between 
3and200. - min
Node numberCount  - The minimum number of nodes the Spark Pool can support. Must be between 
3and200. 
- max_
node_ intcount  - The maximum number of nodes the Spark Pool can support. Must be between 
3and200. - min_
node_ intcount  - The minimum number of nodes the Spark Pool can support. Must be between 
3and200. 
- max
Node NumberCount  - The maximum number of nodes the Spark Pool can support. Must be between 
3and200. - min
Node NumberCount  - The minimum number of nodes the Spark Pool can support. Must be between 
3and200. 
SparkPoolLibraryRequirement, SparkPoolLibraryRequirementArgs        
SparkPoolSparkConfig, SparkPoolSparkConfigArgs        
Import
Synapse Spark Pool can be imported using the resource id, e.g.
$ pulumi import azure:synapse/sparkPool:SparkPool example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Synapse/workspaces/workspace1/bigDataPools/sparkPool1
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
 - Azure Classic pulumi/pulumi-azure
 - License
 - Apache-2.0
 - Notes
 - This Pulumi package is based on the 
azurermTerraform Provider.