Terraform module to deploy Trend Micro Vision One File Security via Helm with simple syntax and minimal configuration.
For detailed Helm chart configuration, values, and advanced options, see the official Helm chart documentation:
https://trendmicro.github.io/visionone-file-security-helm/
module "v1fs" {
source = "../../modules/v1fs"
release_name = "v1fs"
chart_version = "1.4.0"
namespace = "visionone-filesecurity"
registration_token = var.registration_token
# NGINX Ingress
ingress_class_name = "nginx"
domain_name = "scanner.local.k8s"
# Scanner Configuration
scanner_replicas = 1
scanner_cpu_request = "800m"
scanner_memory_request = "2Gi"
}module "v1fs" {
source = "../../modules/v1fs"
release_name = "v1fs"
chart_version = "1.4.0"
namespace = "visionone-filesecurity"
registration_token = var.registration_token
# AWS ALB Ingress
ingress_class_name = "alb"
domain_name = "scanner.example.com"
alb_certificate_arn = "arn:aws:acm:us-east-1:123456789012:certificate/xxx"
alb_scheme = "internet-facing"
# Scanner Configuration
scanner_replicas = 2
scanner_cpu_request = "800m"
scanner_memory_request = "2Gi"
enable_scanner_autoscaling = true
scanner_autoscaling_min_replicas = 2
scanner_autoscaling_max_replicas = 10
}module "v1fs" {
source = "../../modules/v1fs"
release_name = "v1fs"
chart_version = "1.4.0"
namespace = "visionone-filesecurity"
registration_token = var.registration_token
ingress_class_name = "alb"
domain_name = "scanner.example.com"
alb_certificate_arn = var.certificate_arn
# Enable Management Service
enable_management = true
management_plugins = [
{
name = "ontap-agent"
enabled = true
configMapName = "ontap-agent-config"
securitySecretName = "ontap-agent-security"
jwtSecretName = "ontap-agent-jwt"
}
]
# Enable Database for Management
enable_management_db = true
create_database_storage_class = false
database_storage_class_name = "gp3"
}module "v1fs" {
source = "../../modules/v1fs"
release_name = "v1fs"
namespace = "visionone-filesecurity"
registration_token = var.registration_token
# Use local chart instead of remote repository
chart_path = "../../../v1fs-helm/amaas-helm/visionone-filesecurity"
ingress_class_name = "nginx"
domain_name = "scanner.local.k8s"
}To upgrade Vision One File Security to a newer version, simply update the chart_version in your terraform.tfvars or module configuration:
# Before
chart_version = "1.4.0"
# After
chart_version = "1.5.0"Then apply the changes:
terraform plan # Review the upgrade changes
terraform apply # Execute the upgradeTerraform will detect the version change and trigger a Helm release upgrade. Helm handles the rolling update of pods automatically, ensuring minimal downtime.
Note: Always review the Helm Chart Release Notes before upgrading to check for breaking changes or migration steps.
Complete, ready-to-use examples are available in the examples/ directory:
| Example | Description | Directory |
|---|---|---|
| Local Kubernetes | Deploy to Minikube, Kind, Docker Desktop, or Colima with NGINX Ingress | examples/local/ |
| AWS EKS | Full infrastructure including EKS cluster, VPC, ALB Controller, and V1FS | examples/aws/ |
Local Kubernetes:
cd examples/local
cp terraform.tfvars.example terraform.tfvars
# Edit terraform.tfvars with your registration token
terraform init
terraform applyAWS EKS:
cd examples/aws
cp terraform.tfvars.example terraform.tfvars
# Edit terraform.tfvars with your AWS and V1FS settings
terraform init
terraform apply| Name | Version |
|---|---|
| Terraform | >= 1.5.0 |
| Kubernetes | >= 1.24 |
| Helm | >= 3.0 |
| kubectl | configured for your cluster |
| Name | Version |
|---|---|
| helm | ~> 2.0 |
| kubernetes | ~> 2.0 |
No requirements.
| Name | Version |
|---|---|
| helm | 3.1.1 |
| kubernetes | 2.38.0 |
| null | 3.2.4 |
No modules.
| Name | Type |
|---|---|
| helm_release.v1fs | resource |
| kubernetes_namespace.v1fs | resource |
| kubernetes_secret.device_token | resource |
| kubernetes_secret.token | resource |
| null_resource.cleanup_database_pvc | resource |
| Name | Description | Type | Default | Required |
|---|---|---|---|---|
| alb_certificate_arn | ACM certificate ARN for HTTPS (required for AWS ALB, optional for other ingress controllers) | string |
"" |
no |
| alb_scheme | ALB scheme: internet-facing or internal | string |
"internet-facing" |
no |
| backend_communicator_cpu_request | CPU request for backend communicator pods | string |
"250m" |
no |
| backend_communicator_memory_request | Memory request for backend communicator pods | string |
"128Mi" |
no |
| chart_name | Helm chart name when using remote repository. Ignored when chart_path is set. | string |
"visionone-filesecurity" |
no |
| chart_path | Local path to Helm chart directory. When set, chart_repository and chart_version are ignored. Use this for development or on-premise deployments with local charts. Path is relative to the Terraform root module (where you call this module). Example: "../../../v1fs-helm/amaas-helm/visionone-filesecurity" |
string |
null |
no |
| chart_repository | Helm chart repository URL | string |
"https://trendmicro.github.io/visionone-file-security-helm/" |
no |
| chart_version | Helm chart version. Required when using remote repository (chart_path is null). | string |
null |
no |
| create_database_storage_class | Whether to create a StorageClass for database persistence. Set to false when using cloud provider storage classes (e.g., EBS gp3). | bool |
true |
no |
| create_namespace | Whether to create the namespace. Set to false if the namespace already exists (e.g., using 'default' or pre-existing namespace) | bool |
true |
no |
| database_cpu_limit | CPU limit for database container pods | string |
"500m" |
no |
| database_cpu_request | CPU request for database container pods | string |
"250m" |
no |
| database_memory_limit | Memory limit for database container pods | string |
"1Gi" |
no |
| database_memory_request | Memory request for database container pods | string |
"512Mi" |
no |
| database_persistence_size | Size of persistent volume for database (e.g., '10Gi', '100Gi') | string |
"100Gi" |
no |
| database_storage_class_host_path | Host path for local StorageClass (only used when database_storage_class_create = true) | string |
"/mnt/data/postgres" |
no |
| database_storage_class_name | StorageClass name for database persistence. Use 'gp3' or 'gp2' for AWS EBS, or custom name for local/hostPath. | string |
"visionone-filesecurity-storage" |
no |
| database_storage_class_reclaim_policy | Reclaim policy for the StorageClass: Delete or Retain | string |
"Retain" |
no |
| domain_name | Domain name for ingress | string |
n/a | yes |
| enable_icap | [NOT PRODUCTION READY] Enable ICAP service with NLB. Currently, only gRPC protocol is supported and stable. ICAP support is under development and should remain disabled. Use gRPC protocol instead for production deployments. |
bool |
false |
no |
| enable_management | Whether to enable management service | bool |
false |
no |
| enable_management_db | Whether to enable PostgreSQL database container for management service. Requires enable_management = true. | bool |
false |
no |
| enable_scan_cache | Enable scan result caching | bool |
true |
no |
| enable_scanner_autoscaling | Whether to enable autoscaling for scanner | bool |
false |
no |
| extra_helm_values | Additional Helm values as a list of YAML strings. Applied after module defaults. Later entries override earlier ones using Helm's deep merge strategy. Useful for injecting values from files or complex configurations not covered by module variables. Example: extra_helm_values = [ file("custom-values.yaml"), yamlencode({ scanner = { nodeSelector = { "node-type" = "scanner" } tolerations = [{ key = "dedicated" value = "scanner" effect = "NoSchedule" }] } }) ] |
list(string) |
[] |
no |
| icap_certificate_arn | ACM certificate ARN for ICAP TLS (optional) | string |
"" |
no |
| icap_nlb_scheme | NLB scheme for ICAP: internet-facing or internal | string |
"internet-facing" |
no |
| icap_port | ICAP service port | number |
1344 |
no |
| image_pull_secrets | List of Kubernetes secret names for pulling images from private registries. Applied globally to all V1FS components (scanner, scanCache, backendCommunicator, managementService, databaseContainer). Example: image_pull_secrets = ["my-registry-secret"] |
list(string) |
[] |
no |
| ingress_class_name | Ingress class name (e.g., 'alb', 'nginx', 'gce') | string |
"alb" |
no |
| log_level | Log level for V1FS services | string |
"INFO" |
no |
| management_cpu_request | CPU request for management service pods | string |
"250m" |
no |
| management_extra_ingress_annotations | Additional annotations for management ingress (merged with defaults) | map(string) |
{} |
no |
| management_memory_request | Memory request for management service pods | string |
"256Mi" |
no |
| management_plugins | Management service plugins configuration. Each plugin requires ALL fields to be specified. Required fields for ontap-agent: - name (required) - Plugin identifier (e.g., "ontap-agent") - enabled (required) - Whether the plugin is enabled - configMapName (required) - Name of the ConfigMap for plugin configuration - securitySecretName (required) - Name of the Secret for security credentials - jwtSecretName (required) - Name of the Secret for JWT token Example (ontap-agent): management_plugins = [ { name = "ontap-agent" enabled = true configMapName = "ontap-agent-config" securitySecretName = "ontap-agent-security" jwtSecretName = "ontap-agent-jwt" } ] |
list(map(any)) |
[] |
no |
| management_websocket_prefix | WebSocket path prefix for the management service | string |
"/ontap" |
no |
| namespace | Kubernetes namespace for V1FS deployment | string |
"visionone-filesecurity" |
no |
| no_proxy | Comma-separated no_proxy list for all V1FS components | string |
"localhost,127.0.0.1,.svc.cluster.local" |
no |
| proxy_url | HTTP/HTTPS proxy URL | string |
"" |
no |
| registration_token | Vision One registration token | string |
n/a | yes |
| release_name | Helm release name | string |
n/a | yes |
| scan_cache_cpu_request | CPU request for scan cache pods | string |
"250m" |
no |
| scan_cache_memory_request | Memory request for scan cache pods | string |
"512Mi" |
no |
| scanner_autoscaling_max_replicas | Maximum replicas for scanner autoscaling | number |
10 |
no |
| scanner_autoscaling_min_replicas | Minimum replicas for scanner autoscaling | number |
1 |
no |
| scanner_config_map_name | ConfigMap name for scanner configuration | string |
"scanner-config" |
no |
| scanner_cpu_request | CPU request for scanner pods | string |
"800m" |
no |
| scanner_extra_ingress_annotations | Additional annotations for scanner ingress (merged with defaults) | map(string) |
{} |
no |
| scanner_memory_request | Memory request for scanner pods | string |
"2Gi" |
no |
| scanner_replicas | Number of scanner replicas | number |
1 |
no |
| Name | Description |
|---|---|
| chart_source | Helm chart source (local path or repository URL) |
| chart_version | Helm chart version |
| database_enabled | Whether PostgreSQL database is enabled for management service |
| device_token_secret_name | Name of the device token secret |
| icap_enabled | Whether ICAP service is enabled |
| is_local_chart | Whether using local Helm chart |
| management_enabled | Whether management service is enabled |
| management_endpoint | Management service endpoint |
| namespace | Kubernetes namespace name |
| release_name | Helm release name |
| release_version | Deployed Helm release version |
| scanner_endpoint | Scanner service endpoint |
| token_secret_name | Name of the token secret |
- Log in to Vision One Console
- Navigate to: File Security → Containerized Scanner
- Click Add Scanner or Get Registration Token
- Copy the token (starts with
eyJ...)
- ICAP Protocol: Currently NOT supported. Use gRPC only.
- Certificate: For AWS ALB, certificate must be in
Issuedstatus before deployment. - Region: ACM certificate must be in the same region as your EKS cluster.
Please read CONTRIBUTING.md for details on how to contribute to this project.
This project has adopted the Contributor Covenant Code of Conduct. Please read it to understand the expectations for participation in this community.
This project is licensed under the Apache License 2.0 - see the LICENSE file for details.